code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "data2vec-vision"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=768 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Any=3_072 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : Tuple=224 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__ : Optional[int]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0.4 , SCREAMING_SNAKE_CASE__ : Dict=256 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=255 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = use_mask_token
lowerCAmelCase__ = use_absolute_position_embeddings
lowerCAmelCase__ = use_relative_position_bias
lowerCAmelCase__ = use_shared_relative_position_bias
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def a ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a ( self : Optional[Any] ) -> float:
return 1e-4
| 61 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case = False
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] , UpperCAmelCase_ : Any=32 ):
set_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(sample_size=UpperCAmelCase_ , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE : Tuple = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCAmelCase_ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [torch.randn((4, 3, 32, 32) ).to(UpperCAmelCase_ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE : Any = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCAmelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase_ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(UpperCAmelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase_ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[int] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE : int = torch.nn.functional.mse_loss(UpperCAmelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5 ) )
| 62 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
import warnings
from .generation import TFGenerationMixin
class _lowerCamelCase ( UpperCamelCase_ ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCamelCase_ , )
| 64 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) ,reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] ,)
def __lowercase ( self : str ,A : List[str] ,A : str ,A : int = 16 ,A : bool = True ,A : Union[str, Any]=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ : str = """cuda"""
else:
UpperCAmelCase__ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(A )
UpperCAmelCase__ : Dict = model.to(A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ : Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ : int = model.config.max_length - 1
else:
UpperCAmelCase__ : List[Any] = model.config.max_length
UpperCAmelCase__ : Tuple = tokenizer(
A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,return_tensors="""pt""" ,return_attention_mask=A ,).to(A )
UpperCAmelCase__ : Optional[Any] = encodings["""input_ids"""]
UpperCAmelCase__ : Dict = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 ,len(A ) ,A ) ):
UpperCAmelCase__ : List[Any] = min(start_index + batch_size ,len(A ) )
UpperCAmelCase__ : Optional[int] = encoded_texts[start_index:end_index]
UpperCAmelCase__ : int = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
UpperCAmelCase__ : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ : Any = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(A ), attn_mask] ,dim=1 )
UpperCAmelCase__ : Tuple = encoded_batch
with torch.no_grad():
UpperCAmelCase__ : str = model(A ,attention_mask=A ).logits
UpperCAmelCase__ : Optional[int] = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ : List[Any] = labels[..., 1:].contiguous()
UpperCAmelCase__ : Optional[Any] = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ : int = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 65 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''camembert'''
def __init__( self : Optional[int] ,__A : Optional[int]=3_0522 ,__A : List[Any]=768 ,__A : Optional[int]=12 ,__A : int=12 ,__A : Dict=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : Any=0.1 ,__A : int=512 ,__A : Union[str, Any]=2 ,__A : Union[str, Any]=0.02 ,__A : Union[str, Any]=1e-12 ,__A : Dict=1 ,__A : List[str]=0 ,__A : int=2 ,__A : Tuple="absolute" ,__A : Union[str, Any]=True ,__A : Optional[Any]=None ,**__A : Optional[Any] ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
def lowercase__ ( A_: str ) -> list:
"""simple docstring"""
__UpperCAmelCase =[0] * len(A_ )
for i in range(1 , len(A_ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase =j
return prefix_result
def lowercase__ ( A_: str ) -> int:
"""simple docstring"""
return max(prefix_function(A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
'''simple docstring'''
import cmath
import math
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> complex:
__snake_case = math.radians(_UpperCAmelCase )
__snake_case = math.radians(_UpperCAmelCase )
# Convert voltage and current to rectangular form
__snake_case = cmath.rect(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = cmath.rect(_UpperCAmelCase , _UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase : Any = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class A( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : str = " " ) -> str:
"""simple docstring"""
lowerCamelCase_ = sentence_delimiter
def a__ ( self : int , A_ : str ) -> Union[str, Any]:
"""simple docstring"""
return list(A_ )
def a__ ( self : List[str] , A_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = []
for sent_idx, sentence in enumerate(A_ ):
chars.extend(self.process_string(A_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase : str = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase : Dict = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase : int = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCamelCase : Optional[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
lowerCamelCase : List[Any] = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def a__ ( self : Optional[Any] , A_ : str , A_ : str , A_ : Optional[Any]=False ) -> Tuple:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
A_ , A_ , truth_transform=A_ , hypothesis_transform=A_ , )["wer"]
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(A_ , A_ ):
lowerCamelCase_ = jiwer.compute_measures(
A_ , A_ , truth_transform=A_ , hypothesis_transform=A_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 70 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple ="longformer"
def __init__( self ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 1 ,_snake_case = 0 ,_snake_case = 2 ,_snake_case = 3_05_22 ,_snake_case = 7_68 ,_snake_case = 12 ,_snake_case = 12 ,_snake_case = 30_72 ,_snake_case = "gelu" ,_snake_case = 0.1 ,_snake_case = 0.1 ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 0.02 ,_snake_case = 1E-12 ,_snake_case = False ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : Union[str, Any] = attention_window
UpperCAmelCase_ : str = sep_token_id
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[int] = onnx_export
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = "default" ,_snake_case = None ):
super().__init__(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = True
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = super().outputs
if self.task == "default":
UpperCAmelCase_ : str = {0: "batch"}
return outputs
@property
def UpperCamelCase__ ( self ):
return 1E-4
@property
def UpperCamelCase__ ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = -1 ,_snake_case = -1 ,_snake_case = False ,_snake_case = None ,):
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
preprocessor=_snake_case ,batch_size=_snake_case ,seq_length=_snake_case ,is_pair=_snake_case ,framework=_snake_case )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : Tuple = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : List[str] = 1
return inputs
| 71 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
from torch.utils.cpp_extension import load
lowercase =Path(lowercase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowercase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 72 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1e-12):
SCREAMING_SNAKE_CASE = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_UpperCAmelCase , axis=1) , a_min=_UpperCAmelCase)).T
SCREAMING_SNAKE_CASE = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_UpperCAmelCase , axis=1) , a_min=_UpperCAmelCase)).T
return jnp.matmul(_UpperCAmelCase , norm_emb_a.T)
class _snake_case ( nn.Module ):
_lowercase : CLIPConfig
_lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE = nn.Dense(self.config.projection_dim , use_bias=a , dtype=self.dtype)
SCREAMING_SNAKE_CASE = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,))
def __call__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = self.vision_model(a)[1]
SCREAMING_SNAKE_CASE = self.visual_projection(a)
SCREAMING_SNAKE_CASE = jax_cosine_distance(a , self.special_care_embeds)
SCREAMING_SNAKE_CASE = jax_cosine_distance(a , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE = jnp.round(a , 3)
SCREAMING_SNAKE_CASE = jnp.any(special_scores > 0 , axis=1 , keepdims=a)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE = is_special_care * 0.01
SCREAMING_SNAKE_CASE = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE = jnp.round(a , 3)
SCREAMING_SNAKE_CASE = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class _snake_case ( A__ ):
_lowercase : Optional[Any] = CLIPConfig
_lowercase : Optional[Any] = '''clip_input'''
_lowercase : Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , a , a = None , a = 0 , a = jnp.floataa , a = True , **a , ) -> Dict:
if input_shape is None:
SCREAMING_SNAKE_CASE = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE = self.module_class(config=a , dtype=a , **a)
super().__init__(a , a , input_shape=a , seed=a , dtype=a , _do_init=_do_init)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> FrozenDict:
# init input tensor
SCREAMING_SNAKE_CASE = jax.random.normal(a , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jax.random.split(a)
SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
SCREAMING_SNAKE_CASE = self.module.init(a , a)['params']
return random_params
def __call__( self , a , a = None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
return self.module.apply(
{'params': params or self.params} , jnp.array(a , dtype=jnp.floataa) , rngs={} , )
| 73 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( snake_case = 100 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : int = 2
for i in range(2 , max_n + 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_numerator
__SCREAMING_SNAKE_CASE : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
__SCREAMING_SNAKE_CASE : Dict = cur_numerator
__SCREAMING_SNAKE_CASE : Dict = e_cont * pre_numerator + temp
return sum_digits(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : str = num_labels
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = n_targets
UpperCAmelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ : List[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ : int = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
UpperCAmelCase__ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
UpperCAmelCase__ : str = labels
return inputs_dict
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = YolosModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
UpperCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ):
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase__ : Any = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
UpperCAmelCase__ : Any = [75, 75, 17, 63, 17]
UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
| 75 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ) -> None:
super().__init__(**UpperCamelCase_ )
__lowercase : str = size if size is not None else {'''shortest_edge''': 2_24}
__lowercase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : int = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='''crop_size''' )
__lowercase : int = do_resize
__lowercase : Any = size
__lowercase : int = resample
__lowercase : List[Any] = do_center_crop
__lowercase : Dict = crop_size
__lowercase : Optional[int] = do_rescale
__lowercase : str = rescale_factor
__lowercase : int = do_normalize
__lowercase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase : Union[str, Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Union[str, Any]:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> PIL.Image.Image:
__lowercase : Any = do_resize if do_resize is not None else self.do_resize
__lowercase : int = size if size is not None else self.size
__lowercase : Dict = get_size_dict(UpperCamelCase_ , param_name='''size''' , default_to_square=UpperCamelCase_ )
__lowercase : Dict = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : str = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' , default_to_square=UpperCamelCase_ )
__lowercase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Any = image_mean if image_mean is not None else self.image_mean
__lowercase : Dict = image_std if image_std is not None else self.image_std
__lowercase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : int = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : List[Any] = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
__lowercase : str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__lowercase : List[str] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__lowercase : List[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__lowercase : Union[str, Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__lowercase : Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowercase : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 76 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A = """pt"""
elif is_tf_available():
A = """tf"""
else:
A = """jax"""
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ByTaTokenizer
lowercase_ = False
def a_ ( self : Any):
"""simple docstring"""
super().setUp()
__UpperCAmelCase : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def a_ ( self : List[str]):
"""simple docstring"""
return ByTaTokenizer.from_pretrained("google/byt5-small")
def a_ ( self : Any , **UpperCamelCase_ : str):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Dict=20 , UpperCamelCase_ : Dict=5):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
for i in range(len(UpperCamelCase_)):
try:
__UpperCAmelCase : Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_)
except UnicodeDecodeError:
pass
toks.append((i, tok))
__UpperCAmelCase : List[str] = list(filter(lambda UpperCamelCase_: re.match(r"^[ a-zA-Z]+$" , t[1]) , UpperCamelCase_))
__UpperCAmelCase : Dict = list(filter(lambda UpperCamelCase_: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_) , UpperCamelCase_))
if max_length is not None and len(UpperCamelCase_) > max_length:
__UpperCAmelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_) < min_length and len(UpperCamelCase_) > 0:
while len(UpperCamelCase_) < min_length:
__UpperCAmelCase : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase : Optional[int] = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_)
if " " not in output_txt and len(UpperCamelCase_) > 1:
__UpperCAmelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_)
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_)
)
if with_prefix_space:
__UpperCAmelCase : Any = " " + output_txt
__UpperCAmelCase : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
return output_txt, output_ids
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.ta_base_tokenizer
__UpperCAmelCase : Any = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"])
__UpperCAmelCase : List[str] = tokenizer(["hi", "I went to the gym", ""])
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"])
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : str = self.ta_base_tokenizer
__UpperCAmelCase : Tuple = "Unicode €."
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase_)
# decoding
__UpperCAmelCase : Optional[Any] = tokenizer.decode(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , "Unicode €.</s>")
__UpperCAmelCase : Any = tokenizer("e è é ê ë")
__UpperCAmelCase : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase_)
# decoding
__UpperCAmelCase : str = tokenizer.decode(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , "e è é ê ë</s>")
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")) , "e è é ê ë</s>")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = self.ta_base_tokenizer
__UpperCAmelCase : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__UpperCAmelCase : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__UpperCAmelCase : Any = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_)
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_)
if FRAMEWORK != "jax":
__UpperCAmelCase : str = list(batch.input_ids.numpy()[0])
else:
__UpperCAmelCase : str = list(batch.input_ids.tolist()[0])
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.ta_base_tokenizer
__UpperCAmelCase : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__UpperCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_)
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCamelCase_)
self.assertIn("attention_mask" , UpperCamelCase_)
self.assertNotIn("decoder_input_ids" , UpperCamelCase_)
self.assertNotIn("decoder_attention_mask" , UpperCamelCase_)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Any = self.ta_base_tokenizer
__UpperCAmelCase : Tuple = [
"Summary of the text.",
"Another summary.",
]
__UpperCAmelCase : Union[str, Any] = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding="max_length" , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_)
self.assertEqual(32 , targets["input_ids"].shape[1])
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.ta_base_tokenizer
__UpperCAmelCase : Any = ["A long paragraph for summarization. </s>"]
__UpperCAmelCase : str = ["Summary of the text. </s>"]
# fmt: off
__UpperCAmelCase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__UpperCAmelCase : Dict = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , batch["input_ids"][0])
self.assertEqual(UpperCamelCase_ , batch["labels"][0])
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
tokenizer.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
shutil.rmtree(UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
__UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"])
__UpperCAmelCase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
__UpperCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
tokenizer.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : List[Any] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
__UpperCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_)
with open(os.path.join(UpperCamelCase_ , "special_tokens_map.json") , encoding="utf-8") as json_file:
__UpperCAmelCase : int = json.load(UpperCamelCase_)
with open(os.path.join(UpperCamelCase_ , "tokenizer_config.json") , encoding="utf-8") as json_file:
__UpperCAmelCase : Dict = json.load(UpperCamelCase_)
__UpperCAmelCase : Dict = [F"<extra_id_{i}>" for i in range(125)]
__UpperCAmelCase : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
__UpperCAmelCase : int = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCamelCase_ , "special_tokens_map.json") , "w" , encoding="utf-8") as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_)
with open(os.path.join(UpperCamelCase_ , "tokenizer_config.json") , "w" , encoding="utf-8") as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : Any = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCamelCase_)]
__UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])) , )
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = tokenizer_class.from_pretrained(UpperCamelCase_)
self.assertTrue(tokenizer.decode([255]) == "")
def a_ ( self : List[str]):
"""simple docstring"""
pass
def a_ ( self : List[Any]):
"""simple docstring"""
pass
def a_ ( self : Any):
"""simple docstring"""
pass
def a_ ( self : Tuple):
"""simple docstring"""
pass
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__UpperCAmelCase : Optional[Any] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_string(UpperCamelCase_)
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__UpperCAmelCase : List[Any] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + "_id" , UpperCamelCase_)
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(getattr(UpperCamelCase_ , attr + "_id") , UpperCamelCase_)
setattr(UpperCamelCase_ , attr + "_id" , UpperCamelCase_)
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(getattr(UpperCamelCase_ , attr + "_id") , UpperCamelCase_)
setattr(UpperCamelCase_ , "additional_special_tokens_ids" , [])
self.assertListEqual(getattr(UpperCamelCase_ , "additional_special_tokens") , [])
self.assertListEqual(getattr(UpperCamelCase_ , "additional_special_tokens_ids") , [])
setattr(UpperCamelCase_ , "additional_special_tokens_ids" , [token_id_to_test_setters])
self.assertListEqual(getattr(UpperCamelCase_ , "additional_special_tokens") , [token_to_test_setters])
self.assertListEqual(getattr(UpperCamelCase_ , "additional_special_tokens_ids") , [token_id_to_test_setters])
| 77 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , snake_case_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"{solution() = }")
| 78 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = None
__lowerCamelCase = BloomTokenizerFast
__lowerCamelCase = BloomTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = 'tokenizer_file'
__lowerCamelCase = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def __UpperCAmelCase ( self ):
super().setUp()
UpperCAmelCase__ : Optional[int] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase__ : str = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
UpperCAmelCase__ : int = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_encode_plus(_lowerCAmelCase )["""input_ids"""]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCAmelCase__ : Tuple = """This is a simple input"""
UpperCAmelCase__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCAmelCase__ : List[str] = ("""This is a simple input""", """This is a pair""")
UpperCAmelCase__ : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
UpperCAmelCase__ : str = None # Hotfixing padding = None
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase__ : Optional[int] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=_lowerCAmelCase )
UpperCAmelCase__ : int = next(iter(_lowerCAmelCase ) )["""premise"""] # pick up one data
UpperCAmelCase__ : Dict = list(sample_data.values() )
UpperCAmelCase__ : List[Any] = list(map(tokenizer.encode , _lowerCAmelCase ) )
UpperCAmelCase__ : Any = [tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 79 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
__snake_case :int
__snake_case :int
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = [[] for _ in range(_lowerCAmelCase )]
__lowercase = size
def __getitem__( self : Tuple , _lowerCAmelCase : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _a ( self : str ) -> Dict:
"""simple docstring"""
return self._size
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | None:
"""simple docstring"""
__lowercase = deque([start_vertex] )
__lowercase = [None] * self.size
__lowercase = 0
while queue:
__lowercase = queue.popleft()
__lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowercase = current_distance + edge.weight
__lowercase = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
__lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Optional[Any] ) -> List[Any]:
__snake_case : List[str] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : Optional[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : List[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Union[str, Any] = shift_tokens_right(lowerCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : List[str] = model(lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
__snake_case : List[str] = optax.softmax_cross_entropy(lowerCamelCase , onehot(lowerCamelCase , logits.shape[-1] ) ).mean()
__snake_case : Optional[int] = -(labels.shape[-1] * loss.item())
__snake_case : List[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 81 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Dict=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : List[str]=400 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=True , _UpperCAmelCase : Any=[0.5, 0.5, 0.5] , _UpperCAmelCase : int=[0.5, 0.5, 0.5] , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = DPTImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 82 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
class __snake_case :
def __init__( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
_lowerCamelCase : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : List[str] = module._original_module if isinstance(__lowerCAmelCase , _PatchedModuleObj ) else module
class __snake_case :
snake_case__ : Optional[Any] = []
def __init__( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = obj
_lowerCamelCase : Tuple = target
_lowerCamelCase : Optional[Any] = new
_lowerCamelCase : Optional[Any] = target.split('''.''' )[0]
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Optional[Any] = attrs or []
def __enter__( self : Optional[Any] ):
"""simple docstring"""
*_lowerCamelCase , _lowerCamelCase : str = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowerCAmelCase ) ):
try:
_lowerCamelCase : Tuple = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : Optional[int] = getattr(self.obj , __lowerCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , __lowerCAmelCase , _PatchedModuleObj(__lowerCAmelCase , attrs=self.attrs ) )
_lowerCamelCase : Tuple = getattr(self.obj , __lowerCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowerCAmelCase , __lowerCAmelCase , _PatchedModuleObj(getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , attrs=self.attrs ) )
_lowerCamelCase : List[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase )
# finally set the target attribute
setattr(__lowerCAmelCase , __lowerCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : List[str] = getattr(import_module('''.'''.join(__lowerCAmelCase ) ) , __lowerCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __lowerCAmelCase ) is attr_value:
_lowerCamelCase : Dict = getattr(self.obj , __lowerCAmelCase )
setattr(self.obj , __lowerCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : str = globals()['''__builtins__'''][target_attr]
setattr(self.obj , __lowerCAmelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Optional[int] , *__lowerCAmelCase : Dict ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __lowerCAmelCase , self.original.pop(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 83 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , snake_case=1000 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = tf.convert_to_tensor(snake_case )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = TFLayoutLMModel(config=snake_case )
lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
lowercase = model(snake_case , snake_case , token_type_ids=snake_case )
lowercase = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = TFLayoutLMForMaskedLM(config=snake_case )
lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = TFLayoutLMForSequenceClassification(config=snake_case )
lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = TFLayoutLMForTokenClassification(config=snake_case )
lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = TFLayoutLMForQuestionAnswering(config=snake_case )
lowercase = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Tuple = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFLayoutLMModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def UpperCAmelCase_ ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowercase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowercase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
lowercase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowercase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# initialize model with randomly initialized sequence classification head
lowercase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase = outputs.loss
lowercase = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
lowercase = outputs.logits
lowercase = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# initialize model with randomly initialized token classification head
lowercase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
lowercase = outputs.logits
lowercase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# initialize model with randomly initialized token classification head
lowercase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
lowercase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 84 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 85 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = 0
@slow
def __A ( self : str ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCAmelCase ) , 0 )
def __A ( self : List[Any] ):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __A ( self : Tuple ):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __A ( self : Tuple ):
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
# Check that tokenizer_type ≠ model_type
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __A ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCAmelCase , "vocab.txt" ) )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , tokenizer_type="bert" , use_fast=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCAmelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCAmelCase , "merges.txt" ) )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , tokenizer_type="gpt2" , use_fast=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@require_tokenizers
def __A ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCAmelCase , "vocab.txt" ) )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , tokenizer_type="bert" )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCAmelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCAmelCase , "merges.txt" ) )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , tokenizer_type="gpt2" )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
with pytest.raises(UpperCAmelCase ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def __A ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __A ( self : List[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCAmelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
A_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def __A ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ = TOKENIZER_MAPPING.values()
A_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCAmelCase )
@require_tokenizers
def __A ( self : List[str] ):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCAmelCase ) , UpperCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , UpperCAmelCase )
@require_tokenizers
def __A ( self : int ):
A_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=UpperCAmelCase )
A_ = "Hello, world. How are you?"
A_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual("[UNK]" , tokens[0] )
A_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=UpperCAmelCase )
A_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def __A ( self : Union[str, Any] ):
A_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def __A ( self : Any ):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __A ( self : Any ):
A_ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def __A ( self : int ):
# Check we can load the tokenizer config of an online model.
A_ = get_tokenizer_config("bert-base-cased" )
A_ = config.pop("_commit_hash" , UpperCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCAmelCase , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ = get_tokenizer_config(UpperCAmelCase )
self.assertDictEqual(UpperCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase )
A_ = get_tokenizer_config(UpperCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def __A ( self : Any ):
try:
AutoConfig.register("custom" , UpperCAmelCase )
AutoTokenizer.register(UpperCAmelCase , slow_tokenizer_class=UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoTokenizer.register(UpperCAmelCase , slow_tokenizer_class=UpperCAmelCase )
A_ = CustomTokenizer.from_pretrained(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __A ( self : Optional[int] ):
try:
AutoConfig.register("custom" , UpperCAmelCase )
# Can register in two steps
AutoTokenizer.register(UpperCAmelCase , slow_tokenizer_class=UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCAmelCase , fast_tokenizer_class=UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCAmelCase , slow_tokenizer_class=UpperCAmelCase , fast_tokenizer_class=UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoTokenizer.register(UpperCAmelCase , fast_tokenizer_class=UpperCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = BertTokenizerFast.from_pretrained(UpperCAmelCase )
bert_tokenizer.save_pretrained(UpperCAmelCase )
A_ = CustomTokenizerFast.from_pretrained(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , use_fast=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __A ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase , use_fast=UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase , use_fast=UpperCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def __A ( self : Union[str, Any] ):
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = False
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = NewTokenizer
_lowerCamelCase : Tuple = False
try:
AutoConfig.register("custom" , UpperCAmelCase )
AutoTokenizer.register(UpperCAmelCase , slow_tokenizer_class=UpperCAmelCase )
AutoTokenizer.register(UpperCAmelCase , fast_tokenizer_class=UpperCAmelCase )
# If remote code is not set, the default is to use local
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase , use_fast=UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCAmelCase , use_fast=UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __A ( self : str ):
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCAmelCase , use_fast=UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def __A ( self : Tuple ):
with self.assertRaisesRegex(
UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
A_ = AutoTokenizer.from_pretrained("bert-base" )
def __A ( self : List[str] ):
with self.assertRaisesRegex(
UpperCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , revision="aaaaaa" )
def __A ( self : List[Any] ):
# Make sure we have cached the tokenizer.
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 86 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCamelCase : Any = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
_lowerCamelCase : List[Any] = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
_lowerCamelCase : Union[str, Any] = """▁"""
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : Dict="</s>" , UpperCAmelCase__ : Optional[int]="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) ->None:
'''simple docstring'''
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase__))
A__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
A__ = len(self.sp_model) - 1
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__)) + [1]
return [1] + ([0] * len(UpperCAmelCase__)) + [1, 1] + ([0] * len(UpperCAmelCase__)) + [1]
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
return len(self.sp_model)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[int]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(UpperCAmelCase__)
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str]) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
A__ = []
A__ = ''''''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
A__ = False
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def __getstate__( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 87 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
"""simple docstring"""
def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : int ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__snake_case , __snake_case ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
_lowerCamelCase : Tuple = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCamelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
SCREAMING_SNAKE_CASE : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE : Optional[int] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE : Tuple = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE : List[Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 89 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCAmelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = "ernie_m"
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowerCamelCase_ = 25_00_02 , lowerCamelCase_ = 7_68 , lowerCamelCase_ = 12 , lowerCamelCase_ = 12 , lowerCamelCase_ = 30_72 , lowerCamelCase_ = "gelu" , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 5_14 , lowerCamelCase_ = 0.02 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1e-05 , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=0.0 , **lowerCamelCase_ , ) -> str:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = act_dropout | 90 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int , snake_case__ : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
A = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
A = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
A = max(len(snake_case__ ) , len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Dict=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ):
'''simple docstring'''
lowercase : List[str] =parent
lowercase : Dict =batch_size
lowercase : List[str] =seq_length
lowercase : List[str] =is_training
lowercase : Union[str, Any] =use_input_mask
lowercase : str =use_token_type_ids
lowercase : List[Any] =use_labels
lowercase : List[Any] =vocab_size
lowercase : Tuple =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : str =hidden_act
lowercase : Any =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : str =max_position_embeddings
lowercase : Dict =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : List[Any] =initializer_range
lowercase : Optional[Any] =num_labels
lowercase : Any =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] =None
if self.use_input_mask:
lowercase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : str =None
lowercase : int =None
lowercase : Any =None
if self.use_labels:
lowercase : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[str] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase__ , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =OpenLlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowercase : Tuple =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , ):
'''simple docstring'''
lowercase : Optional[Any] =True
lowercase : Optional[Any] =OpenLlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowercase : Optional[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , ):
'''simple docstring'''
lowercase : List[str] =OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : int =True
lowercase : List[str] =True
lowercase : Tuple =OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowercase : Optional[int] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowercase : Union[str, Any] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Tuple =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : Any =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : int =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : Dict =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
lowercase : Optional[int] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
lowercase : List[Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : Any =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : Tuple =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str =config_and_inputs
lowercase : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =OpenLlamaModelTester(self )
lowercase : Dict =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Dict =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[int] =3
lowercase : List[Any] =input_dict['''input_ids''']
lowercase : Any =input_ids.ne(1 ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : Optional[Any] =OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase , lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict =3
lowercase : List[str] ='''single_label_classification'''
lowercase : Tuple =input_dict['''input_ids''']
lowercase : Dict =input_ids.ne(1 ).to(UpperCAmelCase__ )
lowercase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : Dict =OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] =3
lowercase : Optional[int] ='''multi_label_classification'''
lowercase : Dict =input_dict['''input_ids''']
lowercase : Optional[int] =input_ids.ne(1 ).to(UpperCAmelCase__ )
lowercase : Dict =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase : str =OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] =ids_tensor([1, 10] , config.vocab_size )
lowercase : Union[str, Any] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : Dict =OpenLlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
lowercase : Optional[Any] =original_model(UpperCAmelCase__ ).last_hidden_state
lowercase : Tuple =original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase : Optional[int] ={'''type''': scaling_type, '''factor''': 10.0}
lowercase : Tuple =OpenLlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
lowercase : Optional[Any] =scaled_model(UpperCAmelCase__ ).last_hidden_state
lowercase : Optional[int] =scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
| 92 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch""", """scipy"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''decision_transformer'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , UpperCAmelCase : Optional[Any]=17 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Tuple=128 , UpperCAmelCase : Optional[int]=4096 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=1 , UpperCAmelCase : Optional[Any]=1024 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : List[str]=None , UpperCAmelCase : Union[str, Any]="relu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=1e-5 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=5_0256 , UpperCAmelCase : List[str]=5_0256 , UpperCAmelCase : Tuple=False , UpperCAmelCase : str=False , **UpperCAmelCase : Union[str, Any] , ) -> int:
'''simple docstring'''
lowercase : int =state_dim
lowercase : Union[str, Any] =act_dim
lowercase : Optional[int] =hidden_size
lowercase : str =max_ep_len
lowercase : Any =action_tanh
lowercase : List[str] =vocab_size
lowercase : Dict =n_positions
lowercase : Optional[int] =n_layer
lowercase : Tuple =n_head
lowercase : Dict =n_inner
lowercase : str =activation_function
lowercase : List[Any] =resid_pdrop
lowercase : List[Any] =embd_pdrop
lowercase : Dict =attn_pdrop
lowercase : Optional[int] =layer_norm_epsilon
lowercase : List[Any] =initializer_range
lowercase : List[str] =scale_attn_weights
lowercase : List[Any] =use_cache
lowercase : Any =scale_attn_by_inverse_layer_idx
lowercase : str =reorder_and_upcast_attn
lowercase : List[str] =bos_token_id
lowercase : Optional[int] =eos_token_id
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 94 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ = []
def snake_case ( A__ ,A__ ,A__ ):
for i in range(len(A__ ) ):
if board[row][i] == 1:
return False
for i in range(len(A__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A__ ,-1 ,-1 ) ,range(A__ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A__ ,-1 ,-1 ) ,range(A__ ,len(A__ ) ) ):
if board[i][j] == 1:
return False
return True
def snake_case ( A__ ,A__ ):
if row >= len(A__ ):
solution.append(A__ )
printboard(A__ )
print()
return True
for i in range(len(A__ ) ):
if is_safe(A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = 1
solve(A__ ,row + 1 )
UpperCAmelCase_ : List[Any] = 0
return False
def snake_case ( A__ ):
for i in range(len(A__ ) ):
for j in range(len(A__ ) ):
if board[i][j] == 1:
print("Q" ,end=" " )
else:
print("." ,end=" " )
print()
# n=int(input("The no. of queens"))
lowerCamelCase_ = 8
lowerCamelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 95 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : str ) -> int:
__magic_name__: str = []
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: str = self.in_channels if i == 0 else self.out_channels
__magic_name__: Optional[int] = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: Any = resnets
__magic_name__: str = attentions
if self.add_downsample:
__magic_name__: List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : List[str]=True ) -> str:
__magic_name__: str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__magic_name__: Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: Union[str, Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__: Any = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Optional[Any] = []
for i in range(self.num_layers ):
__magic_name__: str = self.in_channels if i == 0 else self.out_channels
__magic_name__: Tuple = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Union[str, Any] = resnets
if self.add_downsample:
__magic_name__: str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any]=True ) -> Dict:
__magic_name__: Optional[Any] = ()
for resnet in self.resnets:
__magic_name__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__: List[str] = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : int ) -> int:
__magic_name__: Optional[int] = []
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__: Any = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__: Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: List[str] = resnets
__magic_name__: List[str] = attentions
if self.add_upsample:
__magic_name__: str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Dict=True ) -> List[Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__magic_name__: Any = res_hidden_states_tuple[-1]
__magic_name__: Optional[int] = res_hidden_states_tuple[:-1]
__magic_name__: Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__: Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: str = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__magic_name__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__: List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__: List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Any = resnets
if self.add_upsample:
__magic_name__: List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int]=True ) -> int:
for resnet in self.resnets:
# pop res hidden states
__magic_name__: List[Any] = res_hidden_states_tuple[-1]
__magic_name__: Dict = res_hidden_states_tuple[:-1]
__magic_name__: Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__magic_name__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : Dict ) -> Dict:
# there is always at least one resnet
__magic_name__: Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__magic_name__: str = []
for _ in range(self.num_layers ):
__magic_name__: List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Optional[Any] = resnets
__magic_name__: int = attentions
def __call__( self : str , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=True ) -> int:
__magic_name__: Optional[Any] = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__magic_name__: Optional[Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: str = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 96 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ['pixel_values']
def __init__( self : int , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : int , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> str:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> PIL.Image.Image:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 97 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
from __future__ import annotations
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
def __snake_case ( lowerCAmelCase_ ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = grid[row_n]
SCREAMING_SNAKE_CASE__ = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = grid[row_n]
return grid[-1][-1]
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
from collections import deque
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = process_name # process name
SCREAMING_SNAKE_CASE_ : str = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ : Optional[Any] = arrival_time
SCREAMING_SNAKE_CASE_ : Tuple = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ : Optional[int] = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ : Any = 0 # time from arrival time to completion time
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ : int = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = queue
# current time
SCREAMING_SNAKE_CASE_ : List[str] = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ : deque[Process] = deque()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for i in range(len(lowerCAmelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in range(len(lowerCAmelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : deque[Process] = deque() # sequence deque of finished process
while len(lowerCAmelCase__ ) != 0:
SCREAMING_SNAKE_CASE_ : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ : int = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ : str = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ : Optional[int] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
# set the finish time
SCREAMING_SNAKE_CASE_ : str = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ : Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase__ ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase__ : Tuple =Process('P1', 0, 53)
lowerCAmelCase__ : str =Process('P2', 0, 17)
lowerCAmelCase__ : int =Process('P3', 0, 68)
lowerCAmelCase__ : List[Any] =Process('P4', 0, 24)
lowerCAmelCase__ : Tuple =3
lowerCAmelCase__ : Any =[17, 25]
lowerCAmelCase__ : Tuple =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase__ : Optional[int] =Process('P1', 0, 53)
lowerCAmelCase__ : List[str] =Process('P2', 0, 17)
lowerCAmelCase__ : List[Any] =Process('P3', 0, 68)
lowerCAmelCase__ : Optional[int] =Process('P4', 0, 24)
lowerCAmelCase__ : List[Any] =3
lowerCAmelCase__ : Optional[int] =[17, 25]
lowerCAmelCase__ : Optional[int] =deque([Pa, Pa, Pa, Pa])
lowerCAmelCase__ : List[Any] =MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase__ : Tuple =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 101 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = args.log_outputs
UpperCamelCase : Optional[int] = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase : Dict = load_metric("""wer""" )
UpperCamelCase : str = load_metric("""cer""" )
# compute metrics
UpperCamelCase : Union[str, Any] = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCamelCase : Tuple = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCamelCase : Union[str, Any] = f"""WER: {wer_result}\nCER: {cer_result}"""
print(SCREAMING_SNAKE_CASE )
with open(f"""{dataset_id}_eval_results.txt""" , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase : List[Any] = f"""log_{dataset_id}_predictions.txt"""
UpperCamelCase : Union[str, Any] = f"""log_{dataset_id}_targets.txt"""
with open(SCREAMING_SNAKE_CASE , """w""" ) as p, open(SCREAMING_SNAKE_CASE , """w""" ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
p.write(f"""{i}""" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"""{i}""" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase : Optional[Any] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase : Union[str, Any] = """ """.join(text.split(SCREAMING_SNAKE_CASE ) )
return text
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
# load dataset
UpperCamelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
UpperCamelCase : Optional[int] = dataset.cast_column("""audio""" , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
UpperCamelCase : Dict = 0 if torch.cuda.is_available() else -1
UpperCamelCase : Dict = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase : str = prediction["""text"""]
UpperCamelCase : Any = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase : List[Any] = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__magic_name__ : int = parser.parse_args()
main(args)
| 102 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : str ):
"""simple docstring"""
_snake_case = feature_size
_snake_case = sampling_rate
_snake_case = padding_value
_snake_case = kwargs.pop('''padding_side''' , '''right''' )
_snake_case = kwargs.pop('''return_attention_mask''' , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
_snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_snake_case = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
_snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
_snake_case = '''tf'''
elif is_torch_tensor(__lowerCamelCase ):
_snake_case = '''pt'''
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
_snake_case = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_snake_case = to_numpy(__lowerCamelCase )
else:
_snake_case = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_snake_case = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_snake_case = []
for i in range(__lowerCamelCase ):
_snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
_snake_case = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_snake_case = PaddingStrategy.MAX_LENGTH
_snake_case = {}
for i in range(__lowerCamelCase ):
# padding
_snake_case = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_snake_case = []
if value.dtype is np.dtype(np.floataa ):
_snake_case = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_snake_case = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_snake_case = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_snake_case = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
_snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : Any=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
_snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = padding
else:
_snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 103 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : dict ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(UpperCAmelCase_, params=UpperCAmelCase_ ).content, "html.parser" )
A__ = soup.find("div", attrs={"class": "gs_ri"} )
A__ = div.find("div", attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCamelCase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 104 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.post_net.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE_ : Any = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE_ : Any = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['objective.W']
return model
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = checkpoint['Downstream']
SCREAMING_SNAKE_CASE_ : List[Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE_ : Any = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE_ : str = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase__ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 105 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :str =logging.get_logger(__name__)
__snake_case :Tuple ={
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Tuple = 'speech_to_text_2'
A_ : Union[str, Any] = ['past_key_values']
A_ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : Union[str, Any]=10_000 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Any=2_048 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]="relu" , __UpperCamelCase : Optional[Any]=256 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Dict=1_024 , **__UpperCamelCase : Any , ) -> List[Any]:
A = vocab_size
A = d_model
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = decoder_layerdrop
A = use_cache
A = decoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_target_positions
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 106 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_A = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple , __snake_case : Dict=1 , **__snake_case : str ):
_A = factor * value
_A = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 107 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
from __future__ import annotations
from typing import Any
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> None:
create_state_space_tree(__snake_case , [] , 0 )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> None:
if index == len(__snake_case ):
print(__snake_case )
return
create_state_space_tree(__snake_case , __snake_case , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__snake_case , __snake_case , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__a: list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq) | 108 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
'''simple docstring'''
import math
import random
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a = 0.02
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCAmelCase ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(__UpperCAmelCase , __UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input("Expected value: "))
a = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 109 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = emb.weight.shape
UpperCAmelCase__ : Optional[int] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
UpperCAmelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def lowerCamelCase ( _snake_case ,_snake_case="facebook/mbart-large-en-ro" ,_snake_case=False ,_snake_case=False ):
UpperCAmelCase__ : Union[str, Any] = torch.load(_snake_case ,map_location='cpu' )['model']
remove_ignore_keys_(_snake_case )
UpperCAmelCase__ : int = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCAmelCase__ : List[Any] = MBartConfig.from_pretrained(_snake_case ,vocab_size=_snake_case )
if mbart_aa and finetuned:
UpperCAmelCase__ : Optional[int] = 'relu'
UpperCAmelCase__ : int = state_dict['decoder.embed_tokens.weight']
UpperCAmelCase__ : Optional[int] = MBartForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case )
if finetuned:
UpperCAmelCase__ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 110 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger("""transformers.models.speecht5""")
_SCREAMING_SNAKE_CASE = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_SCREAMING_SNAKE_CASE = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_SCREAMING_SNAKE_CASE = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_SCREAMING_SNAKE_CASE = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_SCREAMING_SNAKE_CASE = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_SCREAMING_SNAKE_CASE = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_SCREAMING_SNAKE_CASE = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_SCREAMING_SNAKE_CASE = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = []
if task == "s2t":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2T
UpperCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase = None
UpperCamelCase = MAPPING_T2S
UpperCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2S
UpperCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(f"""{name} was ignored""" )
continue
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
UpperCamelCase = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase = name.split(""".""" )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Dict:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = SpeechTaConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = SpeechTaConfig()
if task == "s2t":
UpperCamelCase = config.max_text_positions
UpperCamelCase = SpeechTaForSpeechToText(__UpperCamelCase )
elif task == "t2s":
UpperCamelCase = 1876
UpperCamelCase = 600
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForTextToSpeech(__UpperCamelCase )
elif task == "s2s":
UpperCamelCase = 1876
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForSpeechToSpeech(__UpperCamelCase )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
UpperCamelCase = SpeechTaTokenizer(__UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken("""<mask>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
UpperCamelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = SpeechTaProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 537 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __lowerCamelCase :
def __init__( self , UpperCAmelCase ):
lowerCamelCase_ = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase )
self.set_fail_transitions()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = 0
for character in keyword:
lowerCamelCase_ = self.find_next_state(UpperCAmelCase , UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowerCamelCase_ = len(self.adlist ) - 1
else:
lowerCamelCase_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase )
lowerCamelCase_ = 0
while q:
lowerCamelCase_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase )
lowerCamelCase_ = self.adlist[r]['fail_state']
while (
self.find_next_state(UpperCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowerCamelCase_ = self.adlist[state]['fail_state']
lowerCamelCase_ = self.find_next_state(
UpperCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowerCamelCase_ = 0
lowerCamelCase_ = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = {} # returns a dict with keywords and list of its occurrences
lowerCamelCase_ = 0
for i in range(len(UpperCAmelCase ) ):
while (
self.find_next_state(UpperCAmelCase , string[i] ) is None
and current_state != 0
):
lowerCamelCase_ = self.adlist[current_state]['fail_state']
lowerCamelCase_ = self.find_next_state(UpperCAmelCase , string[i] )
if next_state is None:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowerCamelCase_ = []
result[key].append(i - len(UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 475 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 392 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : List[str]=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Dict=9_9 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : str=4 ,SCREAMING_SNAKE_CASE__ : Any=3_7 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,):
__lowerCamelCase : str = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : List[Any] = seq_length
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Optional[Any] = use_input_mask
__lowerCamelCase : Dict = use_token_type_ids
__lowerCamelCase : Optional[Any] = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : Tuple = num_choices
__lowerCamelCase : Union[str, Any] = scope
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
__lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Optional[Any] = None
if self.use_labels:
__lowerCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
__lowerCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices)
__lowerCamelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Union[str, Any]):
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = NystromformerModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Dict = NystromformerForMaskedLM(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : str = NystromformerForQuestionAnswering(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : int = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : List[Any] = self.num_labels
__lowerCamelCase : List[str] = NystromformerForSequenceClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : List[str] = NystromformerForTokenClassification(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Optional[int] = self.num_choices
__lowerCamelCase : Tuple = NystromformerForMultipleChoice(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Dict = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
__lowerCamelCase : str = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
__lowerCamelCase : Optional[int] = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
__lowerCamelCase : Union[str, Any] = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
__lowerCamelCase
) : Union[str, Any] = config_and_inputs
__lowerCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[Any] = NystromformerModelTester(self)
__lowerCamelCase : Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : Union[str, Any]):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Union[str, Any]):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[int] = NystromformerModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_torch
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = NystromformerModel.from_pretrained('uw-madison/nystromformer-512')
__lowerCamelCase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Any = torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]])
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = 'the [MASK] of Belgium is Brussels'
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512')
__lowerCamelCase : Dict = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512')
__lowerCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
with torch.no_grad():
__lowerCamelCase : Dict = model(encoding.input_ids).logits
__lowerCamelCase : Optional[int] = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__) ,'capital')
| 652 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowercase_ ( nn.Module ):
def __init__( self ) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ : int =nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ : Tuple =nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ : Optional[int] =nn.Linear(4 , 5 )
def _snake_case ( self , __A ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__A , model.state_dict() )
SCREAMING_SNAKE_CASE_ : Tuple =os.path.join(__A , '''index.json''' )
self.assertTrue(os.path.isfile(__A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =os.path.join(__A , F'{key}.dat' )
self.assertTrue(os.path.isfile(__A ) )
# TODO: add tests on the fact weights are properly loaded
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =[torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE_ : int =torch.randn(2 , 3 , dtype=__A )
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : str =offload_weight(__A , '''weight''' , __A , {} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =os.path.join(__A , '''weight.dat''' )
self.assertTrue(os.path.isfile(__A ) )
self.assertDictEqual(__A , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(__A ).split('''.''' )[1]}} )
SCREAMING_SNAKE_CASE_ : Any =load_offloaded_weight(__A , index['''weight'''] )
self.assertTrue(torch.equal(__A , __A ) )
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[Any] =ModelForTest()
SCREAMING_SNAKE_CASE_ : List[str] =model.state_dict()
SCREAMING_SNAKE_CASE_ : str ={k: v for k, v in state_dict.items() if 'linear2' not in k}
SCREAMING_SNAKE_CASE_ : Optional[Any] ={k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =OffloadedWeightsLoader(state_dict=__A , save_folder=__A )
# Every key is there with the right value
self.assertEqual(sorted(__A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__A , weight_map[key] ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={k: v for k, v in state_dict.items() if 'weight' in k}
SCREAMING_SNAKE_CASE_ : int ={k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__A , __A )
SCREAMING_SNAKE_CASE_ : List[str] =OffloadedWeightsLoader(state_dict=__A , save_folder=__A )
# Every key is there with the right value
self.assertEqual(sorted(__A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__A , __A )
# Duplicates are removed
SCREAMING_SNAKE_CASE_ : List[str] =OffloadedWeightsLoader(state_dict=__A , save_folder=__A )
# Every key is there with the right value
self.assertEqual(sorted(__A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__A , weight_map[key] ) )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : str ={'a.1': 0, 'a.10': 1, 'a.2': 2}
SCREAMING_SNAKE_CASE_ : Dict =extract_submodules_state_dict(__A , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__A , {'''a.1''': 0, '''a.2''': 2} )
SCREAMING_SNAKE_CASE_ : Tuple ={'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
SCREAMING_SNAKE_CASE_ : str =extract_submodules_state_dict(__A , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__A , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 443 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
from collections.abc import Callable
import numpy as np
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(__UpperCamelCase ):
__lowercase = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
__lowercase = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
'''simple docstring'''
from __future__ import annotations
def A ( UpperCamelCase_ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 2
lowerCAmelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase )
if n > 1:
factors.append(__UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
A_ = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __lowercase ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['''input_ids''', '''attention_mask''']
lowercase = RobertaTokenizer
def __init__( self : List[str] , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Any="<mask>" , __lowerCamelCase : int=False , __lowerCamelCase : Tuple=True , **__lowerCamelCase : int , ) -> Dict:
'''simple docstring'''
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
lowercase = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**__lowerCamelCase )
lowercase = add_prefix_space
lowercase = 'post_processor'
lowercase = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase = tuple(state['''sep'''] )
if "cls" in state:
lowercase = tuple(state['''cls'''] )
lowercase = False
if state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
lowercase = add_prefix_space
lowercase = True
if state.get('''trim_offsets''' , __lowerCamelCase ) != trim_offsets:
lowercase = trim_offsets
lowercase = True
if changes_to_apply:
lowercase = getattr(__lowerCamelCase , state.pop('''type''' ) )
lowercase = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Union[str, Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
lowercase = value
def __a ( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __a ( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __a ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any = None ) -> int:
'''simple docstring'''
lowercase = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=None ) -> Optional[int]:
'''simple docstring'''
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int = None ) -> str:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 604 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase__ : List[Any] = get_tests_dir("""fixtures""")
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = mock.Mock()
snake_case__ = 5_00
snake_case__ = {}
snake_case__ = HTTPError
snake_case__ = {}
# Download this model to make sure it's in the cache.
snake_case__ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
snake_case__ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_a )
@is_staging_test
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:int ):
snake_case__ = TOKEN
HfFolder.save_token(_a )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
snake_case__ = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id='''test-image-processor''' , push_to_hub=_a , use_auth_token=self._token )
snake_case__ = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ViTImageProcessor.from_pretrained(_a )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
snake_case__ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_a , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_a , use_auth_token=self._token )
snake_case__ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
CustomImageProcessor.register_for_auto_class()
snake_case__ = CustomImageProcessor.from_pretrained(_a )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
snake_case__ = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 33 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ :Union[str, Any] = get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = '''all_checks'''
_SCREAMING_SNAKE_CASE = '''basic_checks'''
_SCREAMING_SNAKE_CASE = '''no_checks'''
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def lowercase_ (A : Optional[dict] , A : dict , A : Any=None ):
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
snake_case__ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case__ : List[Any] = ' for ' + verification_name if verification_name is not None else ''
if len(__UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def lowercase_ (A : Optional[dict] , A : dict ):
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
if len(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__UpperCamelCase ) - set(__UpperCamelCase ) ) )
snake_case__ : List[Any] = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__UpperCamelCase ) )
logger.info('All the splits matched successfully.' )
def lowercase_ (A : str , A : bool = True ):
if record_checksum:
snake_case__ : str = shaaaa()
with open(__UpperCamelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'' ):
m.update(__UpperCamelCase )
snake_case__ : Optional[int] = m.hexdigest()
else:
snake_case__ : int = None
return {"num_bytes": os.path.getsize(__UpperCamelCase ), "checksum": checksum}
def lowercase_ (A : Optional[int] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 478 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
while a != 0:
UpperCamelCase = b % a, a
return b
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if gcd(__UpperCamelCase , __UpperCamelCase ) != 1:
UpperCamelCase = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__UpperCamelCase )
UpperCamelCase = 1, 0, a
UpperCamelCase = 0, 1, m
while va != 0:
UpperCamelCase = ua // va
UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 537 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase ( ):
lowerCamelCase_ = 9, 14 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase_ = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(__UpperCamelCase )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 29 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A = logging.get_logger(__name__)
A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
A = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
A = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= VOCAB_FILES_NAMES
A__= PRETRAINED_VOCAB_FILES_MAP
A__= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__= PRETRAINED_INIT_CONFIGURATION
A__= ['''input_ids''', '''attention_mask''']
A__= DistilBertTokenizer
def __init__( self : Tuple , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : List[str]=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : str="[SEP]" , _lowercase : Dict="[PAD]" , _lowercase : Optional[int]="[CLS]" , _lowercase : Tuple="[MASK]" , _lowercase : Any=True , _lowercase : List[Any]=None , **_lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowercase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(_lowercase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**_lowercase )
UpperCAmelCase__ = do_lower_case
def _UpperCAmelCase ( self : str , _lowercase : Optional[Any] , _lowercase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Any = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Tuple , _lowercase : int , _lowercase : int = None ):
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 475 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Dict ) ->Any:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCamelCase_ ):
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = FlaxAutoModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def UpperCAmelCase ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCamelCase_ ):
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = FlaxAutoModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def UpperCAmelCase ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase_ : Optional[int] ):
return model(**lowerCamelCase_ )
eval(**lowerCamelCase_ ).block_until_ready()
@slow
def UpperCAmelCase ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ = FlaxRobertaModel.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase_ : Union[str, Any] ):
return model(**lowerCamelCase_ )
eval(**lowerCamelCase_ ).block_until_ready()
def UpperCAmelCase ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCAmelCase__ = FlaxAutoModel.from_pretrained("""bert-base""" )
def UpperCAmelCase ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCAmelCase__ = FlaxAutoModel.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def UpperCAmelCase ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCAmelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase ( self : str ) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase_ , """Use `from_pt=True` to load this model""" ):
UpperCAmelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 392 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a =50000
a =5000
a , a =os.path.split(__file__)
a =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
for i in range(__UpperCamelCase ):
__lowerCamelCase : str = dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
__lowerCamelCase : Optional[int] = dataset[i : i + batch_size]
@get_duration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
with dataset.formatted_as(type=__UpperCamelCase ):
for i in range(__UpperCamelCase ):
__lowerCamelCase : Tuple = dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with dataset.formatted_as(type=__UpperCamelCase ):
for i in range(0 , __UpperCamelCase , __UpperCamelCase ):
__lowerCamelCase : str = dataset[i : i + batch_size]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : int = {'num examples': SPEED_TEST_N_EXAMPLES}
__lowerCamelCase : Union[str, Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
__lowerCamelCase : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
__lowerCamelCase : Any = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
__lowerCamelCase : int = generate_example_dataset(
os.path.join(__UpperCamelCase , 'dataset.arrow' ) , __UpperCamelCase , num_examples=__UpperCamelCase , seq_shapes={'list': (1_0_0,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(__UpperCamelCase ) )
__lowerCamelCase : List[Any] = func(__UpperCamelCase , **__UpperCamelCase )
print('shuffling dataset' )
__lowerCamelCase : List[Any] = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(__UpperCamelCase ) )
__lowerCamelCase : List[str] = func(
__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , 'wb' ) as f:
f.write(json.dumps(__UpperCamelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 652 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 1 / sqrt(2 ) ) -> Any:
SCREAMING_SNAKE_CASE_ : int =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Tuple =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : int =(1 - _cos) / 2
SCREAMING_SNAKE_CASE_ : List[str] =1 - _cos
SCREAMING_SNAKE_CASE_ : List[Any] =1 + alpha
SCREAMING_SNAKE_CASE_ : Union[str, Any] =-2 * _cos
SCREAMING_SNAKE_CASE_ : Optional[int] =1 - alpha
SCREAMING_SNAKE_CASE_ : Any =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 1 / sqrt(2 ) ) -> str:
SCREAMING_SNAKE_CASE_ : Any =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Any =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : str =(1 + _cos) / 2
SCREAMING_SNAKE_CASE_ : int =-1 - _cos
SCREAMING_SNAKE_CASE_ : Tuple =1 + alpha
SCREAMING_SNAKE_CASE_ : Union[str, Any] =-2 * _cos
SCREAMING_SNAKE_CASE_ : Tuple =1 - alpha
SCREAMING_SNAKE_CASE_ : Any =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 1 / sqrt(2 ) ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : str =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Tuple =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : List[str] =_sin / 2
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : int =-ba
SCREAMING_SNAKE_CASE_ : List[str] =1 + alpha
SCREAMING_SNAKE_CASE_ : Optional[int] =-2 * _cos
SCREAMING_SNAKE_CASE_ : Dict =1 - alpha
SCREAMING_SNAKE_CASE_ : str =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 1 / sqrt(2 ) ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Tuple =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Optional[Any] =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : Dict =1 - alpha
SCREAMING_SNAKE_CASE_ : int =-2 * _cos
SCREAMING_SNAKE_CASE_ : Tuple =1 + alpha
SCREAMING_SNAKE_CASE_ : Optional[Any] =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : float = 1 / sqrt(2 ) , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Tuple =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : List[str] =1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1 + alpha * big_a
SCREAMING_SNAKE_CASE_ : str =-2 * _cos
SCREAMING_SNAKE_CASE_ : List[Any] =1 - alpha * big_a
SCREAMING_SNAKE_CASE_ : List[str] =1 + alpha / big_a
SCREAMING_SNAKE_CASE_ : Optional[int] =-2 * _cos
SCREAMING_SNAKE_CASE_ : Any =1 - alpha / big_a
SCREAMING_SNAKE_CASE_ : List[str] =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : float = 1 / sqrt(2 ) , ) -> Dict:
SCREAMING_SNAKE_CASE_ : int =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : List[Any] =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : Tuple =1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE_ : str =(big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : Optional[Any] =(big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : Optional[Any] =(big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : str =(big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : Optional[Any] =2 * sqrt(__UpperCamelCase ) * alpha
SCREAMING_SNAKE_CASE_ : Optional[Any] =big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE_ : str =2 * big_a * mpc
SCREAMING_SNAKE_CASE_ : Dict =big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE_ : str =ppmc + aaa
SCREAMING_SNAKE_CASE_ : Union[str, Any] =-2 * pmpc
SCREAMING_SNAKE_CASE_ : List[Any] =ppmc - aaa
SCREAMING_SNAKE_CASE_ : List[Any] =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : float = 1 / sqrt(2 ) , ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] =tau * frequency / samplerate
SCREAMING_SNAKE_CASE_ : Any =sin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] =cos(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =_sin / (2 * q_factor)
SCREAMING_SNAKE_CASE_ : List[Any] =1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE_ : Dict =(big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : str =(big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE_ : Optional[int] =(big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : Optional[int] =(big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE_ : Dict =2 * sqrt(__UpperCamelCase ) * alpha
SCREAMING_SNAKE_CASE_ : List[str] =big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE_ : Tuple =-2 * big_a * pmpc
SCREAMING_SNAKE_CASE_ : Optional[Any] =big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE_ : Optional[Any] =pmc + aaa
SCREAMING_SNAKE_CASE_ : Dict =2 * mpc
SCREAMING_SNAKE_CASE_ : int =pmc - aaa
SCREAMING_SNAKE_CASE_ : Tuple =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 443 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
import os
import sys
import unittest
a : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a : str = os.path.join(git_repo_path, '''src''', '''transformers''')
a : Optional[Any] = '''\n{0} = None\n'''
a : Optional[int] = '''\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'''
a : Union[str, Any] = '''\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'''
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(snake_case_ )
__lowercase = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(snake_case_ , '''tokenizers''' )
__lowercase = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(snake_case_ , '''tensorflow_text''' )
__lowercase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(snake_case_ , '''sentencepiece_and_tokenizers''' )
__lowercase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(snake_case_ , '''sentencepiece_and_tensorflow_text''' )
__lowercase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(snake_case_ , '''sentencepiece_and_tokenizers_and_vision''' )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , snake_case_ )
self.assertIn('''tensorflow_text''' , snake_case_ )
self.assertIn('''sentencepiece_and_tokenizers''' , snake_case_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(snake_case_ , '''\nCONSTANT = None\n''' )
__lowercase = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
snake_case_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__lowercase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
__lowercase = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(snake_case_ , snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
__lowercase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , snake_case_ )
| 639 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
lowercase = ArgumentParser('''Transformers CLI tool''', usage='''transformers-cli <command> [<args>]''' )
lowercase = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
DownloadCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
RunCommand.register_subcommand(__UpperCamelCase )
ServeCommand.register_subcommand(__UpperCamelCase )
UserCommands.register_subcommand(__UpperCamelCase )
AddNewModelCommand.register_subcommand(__UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(__UpperCamelCase )
LfsCommands.register_subcommand(__UpperCamelCase )
PTtoTFCommand.register_subcommand(__UpperCamelCase )
# Let's go
lowercase = parser.parse_args()
if not hasattr(__UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 604 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_a , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_a , '''num_attention_heads''' ) )
class __magic_name__ :
'''simple docstring'''
def __init__( self:Dict , _a:List[str] , _a:Tuple=13 , _a:List[str]=32 , _a:Any=2 , _a:Any=3 , _a:int=6_40 , _a:Dict=4 , _a:Any="silu" , _a:List[Any]=3 , _a:Tuple=32 , _a:Any=0.1 , _a:Any=0.1 , _a:Union[str, Any]=0.1 , _a:Union[str, Any]=0.02 , _a:Optional[int]=True , _a:Optional[int]=True , _a:List[Any]=10 , _a:List[str]=None , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = last_hidden_size
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = conv_kernel_size
snake_case__ = output_stride
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = classifier_dropout_prob
snake_case__ = use_labels
snake_case__ = is_training
snake_case__ = num_labels
snake_case__ = initializer_range
snake_case__ = scope
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[Any] , _a:Dict , _a:Optional[Any] , _a:Union[str, Any] ):
snake_case__ = MobileViTModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[Any] , _a:Optional[Any] , _a:int , _a:int ):
snake_case__ = self.num_labels
snake_case__ = MobileViTForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:int , _a:str , _a:List[str] , _a:Optional[int] ):
snake_case__ = self.num_labels
snake_case__ = MobileViTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ = config_and_inputs
snake_case__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowercase : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : Dict = False
__lowercase : List[Any] = False
__lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = MobileViTModelTester(self )
snake_case__ = MobileViTConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
def check_hidden_states_output(_a:List[str] , _a:List[str] , _a:Optional[int] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.hidden_states
snake_case__ = 5
self.assertEqual(len(_a ) , _a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = MobileViTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
snake_case__ = model.to(_a )
snake_case__ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = outputs.logits
# verify the logits
snake_case__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
snake_case__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
snake_case__ = model.to(_a )
snake_case__ = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = outputs.logits.detach().cpu()
snake_case__ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
snake_case__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
snake_case__ = image_processor.post_process_semantic_segmentation(outputs=_a )
snake_case__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 33 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowercase_ ():
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
snake_case__ : Tuple = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , __UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowercase_ ():
assert _test_patching.open is open
snake_case__ : Tuple = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , __UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowercase_ ():
snake_case__ : Union[str, Any] = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , __UpperCamelCase ):
pass
def lowercase_ ():
snake_case__ : Optional[int] = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , __UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , __UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowercase_ ():
snake_case__ : Optional[Any] = '__test_patch_submodule_start_and_stop_mock__'
snake_case__ : Optional[Any] = patch_submodule(_test_patching , 'open' , __UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowercase_ ():
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
snake_case__ : int = '__test_patch_submodule_successive_join__'
snake_case__ : int = '__test_patch_submodule_successive_dirname__'
snake_case__ : List[str] = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , __UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , __UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , __UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , __UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , __UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , __UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowercase_ ():
snake_case__ : List[str] = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , __UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , __UpperCamelCase ):
pass
| 478 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""Salesforce/codegen-350M-mono""": 2_0_4_8,
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : int="<|endoftext|>" , lowerCamelCase_ : Optional[Any]="<|endoftext|>" , lowerCamelCase_ : Tuple="<|endoftext|>" , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
if kwargs.pop("""add_bos_token""" , lowerCamelCase_ ):
UpperCamelCase = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase_ ) != add_prefix_space:
UpperCamelCase = getattr(lowerCamelCase_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**lowerCamelCase_ )
UpperCamelCase = add_prefix_space
def lowerCamelCase_ ( self : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] = False , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = super().decode(
token_ids=lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
if truncate_before_pattern is not None and len(lowerCamelCase_ ) > 0:
UpperCamelCase = self.truncate(lowerCamelCase_ , lowerCamelCase_ )
return decoded_text
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict ):
"""simple docstring"""
def find_re(lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
UpperCamelCase = pattern.search(lowerCamelCase_ , lowerCamelCase_ )
return m.start() if m else -1
UpperCamelCase = [re.compile(lowerCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCamelCase = list(re.finditer("""^print""" , lowerCamelCase_ , re.MULTILINE ) )
if len(lowerCamelCase_ ) > 1:
UpperCamelCase = completion[: prints[1].start()]
UpperCamelCase = list(re.finditer("""^def""" , lowerCamelCase_ , re.MULTILINE ) )
if len(lowerCamelCase_ ) > 1:
UpperCamelCase = completion[: defs[1].start()]
UpperCamelCase = 0
UpperCamelCase = [
pos for pos in [find_re(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase_ ) > 0:
return completion[: min(lowerCamelCase_ )]
else:
return completion
| 537 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=True , UpperCAmelCase=1 / 255 , UpperCAmelCase=True , ):
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def UpperCAmelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowerCamelCase_ = image.size
else:
lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = YolosImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase_ = self.image_processing_class(do_resize=UpperCAmelCase , do_normalize=UpperCAmelCase , do_rescale=UpperCAmelCase )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase_ = image_processing_a.pad(UpperCAmelCase , return_tensors='''pt''' )
lowerCamelCase_ = image_processing_a(UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 3_9769, 'annotations': target}
# encode them
lowerCamelCase_ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowerCamelCase_ = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ = YolosImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
| 29 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = logging.get_logger(__name__)
A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= '''deformable_detr'''
A__= {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , _lowercase : Tuple=True , _lowercase : Dict=None , _lowercase : Dict=3 , _lowercase : str=3_00 , _lowercase : Union[str, Any]=10_24 , _lowercase : Tuple=6 , _lowercase : Optional[int]=10_24 , _lowercase : Any=8 , _lowercase : Optional[Any]=6 , _lowercase : int=10_24 , _lowercase : List[Any]=8 , _lowercase : Any=0.0 , _lowercase : List[str]=True , _lowercase : Optional[Any]="relu" , _lowercase : List[Any]=2_56 , _lowercase : Dict=0.1 , _lowercase : Union[str, Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : str=0.0_2 , _lowercase : List[Any]=1.0 , _lowercase : List[str]=True , _lowercase : Union[str, Any]=False , _lowercase : Tuple="sine" , _lowercase : Dict="resnet50" , _lowercase : List[Any]=True , _lowercase : Dict=False , _lowercase : Optional[int]=4 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=False , _lowercase : Dict=3_00 , _lowercase : Optional[int]=False , _lowercase : Any=1 , _lowercase : Dict=5 , _lowercase : int=2 , _lowercase : Optional[Any]=1 , _lowercase : Optional[Any]=1 , _lowercase : str=5 , _lowercase : Optional[Any]=2 , _lowercase : str=0.1 , _lowercase : List[str]=0.2_5 , _lowercase : List[Any]=False , **_lowercase : Optional[Any] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ = CONFIG_MAPPING['resnet'](out_features=["stage4"] )
elif isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ = backbone_config.get("model_type" )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(_lowercase )
UpperCAmelCase__ = use_timm_backbone
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_queries
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = init_xavier_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = auxiliary_loss
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = backbone
UpperCAmelCase__ = use_pretrained_backbone
UpperCAmelCase__ = dilation
# deformable attributes
UpperCAmelCase__ = num_feature_levels
UpperCAmelCase__ = encoder_n_points
UpperCAmelCase__ = decoder_n_points
UpperCAmelCase__ = two_stage
UpperCAmelCase__ = two_stage_num_proposals
UpperCAmelCase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = mask_loss_coefficient
UpperCAmelCase__ = dice_loss_coefficient
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
UpperCAmelCase__ = focal_alpha
UpperCAmelCase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.d_model
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 475 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = job['started_at']
UpperCAmelCase__ = job['completed_at']
UpperCAmelCase__ = date_parser.parse(__UpperCamelCase )
UpperCAmelCase__ = date_parser.parse(__UpperCamelCase )
UpperCAmelCase__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase__ = start
UpperCAmelCase__ = end
UpperCAmelCase__ = duration_in_min
return job_info
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ = None
if token is not None:
UpperCAmelCase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
UpperCAmelCase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCAmelCase__ = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
UpperCAmelCase__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCamelCase ) for job in result["""jobs"""]} )
UpperCAmelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
UpperCAmelCase__ = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCamelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
snake_case__ : Union[str, Any] = parser.parse_args()
snake_case__ : Dict = get_job_time(args.workflow_run_id)
snake_case__ : Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 392 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Any=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[Any]=3.6):
__lowerCamelCase : str = tokenizer
__lowerCamelCase : str = tokenizer.bos_token_id
__lowerCamelCase : Tuple = dataset
__lowerCamelCase : List[Any] = seq_length
__lowerCamelCase : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[Any]):
__lowerCamelCase : Optional[Any] = iter(self.dataset)
__lowerCamelCase : List[Any] = True
while more_examples:
__lowerCamelCase : Dict = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(SCREAMING_SNAKE_CASE__)['content'])
buffer_len += len(buffer[-1])
except StopIteration:
__lowerCamelCase : Dict = False
break
__lowerCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__)['input_ids']
__lowerCamelCase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0 ,len(SCREAMING_SNAKE_CASE__) ,self.seq_length):
__lowerCamelCase : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(SCREAMING_SNAKE_CASE__) == self.seq_length:
yield torch.tensor(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = {'streaming': True}
__lowerCamelCase : Dict = load_dataset(args.dataset_name , split='train' , **__UpperCamelCase )
__lowerCamelCase : Optional[Any] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
__lowerCamelCase : Optional[Any] = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
model.eval()
__lowerCamelCase : Any = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase : List[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
__lowerCamelCase : List[str] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase : str = torch.mean(torch.cat(__UpperCamelCase ) )
try:
__lowerCamelCase : Any = torch.exp(__UpperCamelCase )
except OverflowError:
__lowerCamelCase : List[str] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a =Accelerator()
# Parse configuration
a =HfArgumentParser(EvaluationArguments)
a =parser.parse_args()
set_seed(args.seed)
# Logging
a =logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a =create_dataloader(args)
# Prepare everything with our `accelerator`.
a , a =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a , a =evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 652 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.array ) -> Optional[int]:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def get_matched_characters(_UpperCamelCase , _UpperCamelCase ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCamelCase )
__lowercase = F'{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}'
return "".join(__UpperCamelCase )
# matching characters
__lowercase = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__lowercase = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__lowercase = len(__UpperCamelCase )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(__UpperCamelCase , __UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(__UpperCamelCase )
+ match_count / len(__UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 639 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : Tuple = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : Union[str, Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
UpperCAmelCase__ : str = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
UpperCAmelCase__ : str = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
UpperCAmelCase__ : Union[str, Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ : int = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = VOCAB_FILES_NAMES
snake_case__ :Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ :List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ :Union[str, Any] = DPRContextEncoderTokenizer
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ :List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ :List[Any] = DPRQuestionEncoderTokenizer
UpperCAmelCase__ : List[str] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase__ : Optional[int] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase__ : Dict = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
class A :
def __call__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : str = None , __magic_name__ : Optional[Any] = None , __magic_name__ : Dict = False , __magic_name__ : List[str] = False , __magic_name__ : int = None , __magic_name__ : Tuple = None , __magic_name__ : Tuple = None , **__magic_name__ : Dict , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = titles if not isinstance(__magic_name__ , __magic_name__ ) else [titles]
lowerCAmelCase__ = texts if not isinstance(__magic_name__ , __magic_name__ ) else [texts]
lowerCAmelCase__ = len(__magic_name__ )
lowerCAmelCase__ = questions if not isinstance(__magic_name__ , __magic_name__ ) else [questions] * n_passages
assert len(__magic_name__ ) == len(
__magic_name__ ), f"""There should be as many titles than texts but got {len(__magic_name__ )} titles and {len(__magic_name__ )} texts."""
lowerCAmelCase__ = super().__call__(__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['input_ids']
lowerCAmelCase__ = super().__call__(__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['input_ids']
lowerCAmelCase__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__magic_name__ , __magic_name__ )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ = attention_mask
return self.pad(__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str = 16 , __magic_name__ : Dict = 64 , __magic_name__ : int = 4 , ):
"""simple docstring"""
lowerCAmelCase__ = reader_input['input_ids']
lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(__magic_name__ )
lowerCAmelCase__ = sorted(range(__magic_name__ ) , reverse=__magic_name__ , key=relevance_logits.__getitem__ )
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ = len(__magic_name__ )
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__magic_name__ , top_spans=__magic_name__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__magic_name__ , start_index=__magic_name__ , end_index=__magic_name__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__magic_name__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = []
for start_index, start_score in enumerate(__magic_name__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
lowerCAmelCase__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__magic_name__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = VOCAB_FILES_NAMES
snake_case__ :List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ :Dict = ['''input_ids''', '''attention_mask''']
snake_case__ :str = DPRReaderTokenizer
| 48 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ = datasets.utils.logging.get_logger(__name__)
A_ = ["names", "prefix"]
A_ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
A_ = ["encoding_errors", "on_bad_lines"]
A_ = ["date_format"]
@dataclass
class __lowercase ( datasets.BuilderConfig ):
lowercase = ","
lowercase = None
lowercase = "infer"
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = True
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = False
lowercase = None
lowercase = None
lowercase = None
lowercase = True
lowercase = True
lowercase = False
lowercase = True
lowercase = None
lowercase = "."
lowercase = None
lowercase = '"'
lowercase = 0
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = True
lowercase = True
lowercase = 0
lowercase = True
lowercase = False
lowercase = None
lowercase = 10000
lowercase = None
lowercase = "strict"
lowercase = "error"
lowercase = None
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self.delimiter is not None:
lowercase = self.delimiter
if self.column_names is not None:
lowercase = self.column_names
@property
def __a ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowercase ( datasets.ArrowBasedBuilder ):
lowercase = CsvConfig
def __a ( self : int ) -> List[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __a ( self : Tuple , __lowerCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
lowercase = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase = [files]
lowercase = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase = [files]
lowercase = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def __a ( self : int , __lowerCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
if self.config.features is not None:
lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase = table_cast(__lowerCamelCase , __lowerCamelCase )
return pa_table
def __a ( self : Union[str, Any] , __lowerCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
lowercase = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCamelCase ):
lowercase = pa.Table.from_pandas(__lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}' )
raise
| 604 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100_0000 ) -> Dict:
snake_case__ = set(range(3 , __UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , __UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __UpperCamelCase , __UpperCamelCase ) ) )
snake_case__ = [float(__UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(__UpperCamelCase , limit + 1 , __UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
from math import ceil
def lowercase_ (A : int = 1_0_0_1 ):
snake_case__ : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
snake_case__ : str = 2 * i + 1
snake_case__ : Tuple = 2 * i
snake_case__ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a_ :int = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 478 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : str=7 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : str=18 , lowerCamelCase_ : Optional[int]=30 , lowerCamelCase_ : Tuple=400 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ImageGPTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase_ )
UpperCamelCase = self.image_processing_class.from_json_file(lowerCamelCase_ ).to_dict()
UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase_ )
UpperCamelCase = self.image_processing_class.from_pretrained(lowerCamelCase_ ).to_dict()
UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowercase( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCamelCase = Image.open(dataset[4]["""file"""] )
UpperCamelCase = Image.open(dataset[5]["""file"""] )
UpperCamelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCamelCase = prepare_images()
# test non-batched
UpperCamelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase_ )
# test batched
UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase_ )
| 537 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A_ = trt.Logger(trt.Logger.WARNING)
A_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A_ = logging.getLogger(__name__)
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
A_ = parser.parse_args()
if args.tokenizer_name:
A_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
A_ = args.per_device_eval_batch_size
A_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A_ = True
A_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
A_ = """temp_engine/bert-fp16.engine"""
if args.inta:
A_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
A_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A_ = [network.get_input(i) for i in range(network.num_inputs)]
A_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = np.asarray(inputs['''input_ids'''] ,dtype=np.intaa )
lowerCamelCase_ = np.asarray(inputs['''attention_mask'''] ,dtype=np.intaa )
lowerCamelCase_ = np.asarray(inputs['''token_type_ids'''] ,dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] ,input_ids.ravel() ,__UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] ,attention_mask.ravel() ,__UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] ,token_type_ids.ravel() ,__UpperCamelCase )
# start time
lowerCamelCase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(__UpperCamelCase ) for d_inp in d_inputs] + [int(__UpperCamelCase ), int(__UpperCamelCase )] ,stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
cuda.memcpy_dtoh_async(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCamelCase_ = time.time()
lowerCamelCase_ = end_time - start_time
lowerCamelCase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A_ = raw_datasets["""validation"""].column_names
A_ = """question""" if """question""" in column_names else column_names[0]
A_ = """context""" if """context""" in column_names else column_names[1]
A_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
A_ = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCamelCase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] ,examples[context_column_name if pad_on_right else question_column_name] ,truncation='''only_second''' if pad_on_right else '''only_first''' ,max_length=__UpperCamelCase ,stride=args.doc_stride ,return_overflowing_tokens=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,padding='''max_length''' ,)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCamelCase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCamelCase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCamelCase_ = tokenized_examples.sequence_ids(__UpperCamelCase )
lowerCamelCase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCamelCase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCamelCase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
A_ = raw_datasets["""validation"""]
# Validation Feature Creation
A_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
A_ = default_data_collator
A_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
A_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__="eval" ):
lowerCamelCase_ = postprocess_qa_predictions(
examples=__UpperCamelCase ,features=__UpperCamelCase ,predictions=__UpperCamelCase ,version_2_with_negative=args.version_2_with_negative ,n_best_size=args.n_best_size ,max_answer_length=args.max_answer_length ,null_score_diff_threshold=args.null_score_diff_threshold ,output_dir=args.output_dir ,prefix=__UpperCamelCase ,)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCamelCase_ = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCamelCase_ = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCamelCase_ = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__UpperCamelCase ,label_ids=__UpperCamelCase )
A_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase ( lowerCAmelCase__ ):
return trt.volume(engine.get_binding_shape(__UpperCamelCase ) ) * engine.get_binding_dtype(__UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
A_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A_ = cuda.mem_alloc(h_outputa.nbytes)
A_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
A_ = 0.0
A_ = 0
A_ = timeit.default_timer()
A_ = None
for step, batch in enumerate(eval_dataloader):
A_ , A_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A_ , A_ = outputs
A_ = torch.tensor(start_logits)
A_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
A_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
A_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
A_ = nested_truncate(all_preds, len(eval_dataset))
A_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
A_ = post_processing_function(eval_examples, eval_dataset, all_preds)
A_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 29 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 475 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
def lowercase ( _lowerCAmelCase = 1000 ):
return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 392 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
a =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : Union[str, Any] = [False] * len(__UpperCamelCase )
__lowerCamelCase : Optional[int] = [s]
__lowerCamelCase : Optional[Any] = True
while queue:
__lowerCamelCase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
__lowerCamelCase : int = True
__lowerCamelCase : Any = u
return visited[t]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Any = [-1] * (len(__UpperCamelCase ))
__lowerCamelCase : int = 0
__lowerCamelCase : Dict = []
__lowerCamelCase : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowerCamelCase : str = float('Inf' )
__lowerCamelCase : str = sink
while s != source:
# Find the minimum value in select path
__lowerCamelCase : List[str] = min(__UpperCamelCase , graph[parent[s]][s] )
__lowerCamelCase : Optional[Any] = parent[s]
max_flow += path_flow
__lowerCamelCase : Union[str, Any] = sink
while v != source:
__lowerCamelCase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCamelCase : Dict = parent[v]
for i in range(len(__UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 652 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = LongformerTokenizer
__lowerCamelCase = True
__lowerCamelCase = LongformerTokenizerFast
__lowerCamelCase = True
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Any =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] =dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE_ : str =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ : Tuple ={'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def _snake_case ( self , **__A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , **__A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : int ='lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='lower newer'
return input_text, output_text
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : Optional[Any] ='lower newer'
SCREAMING_SNAKE_CASE_ : List[str] =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__A ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__A ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : int =tokenizer.encode(
'''sequence builders''' , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] ='Encode this sequence.'
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer.byte_encoder[' '.encode('''utf-8''' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE_ : Dict =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : str =tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE_ : str =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE_ : Optional[int] ='<mask>'
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE_ : List[str] ='Encode <mask> sequence'
SCREAMING_SNAKE_CASE_ : List[str] ='Encode <mask>sequence'
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.encode(__A )
SCREAMING_SNAKE_CASE_ : Tuple =encoded.index(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.encode(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =encoded.index(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE_ : Tuple =self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] ='A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : Any =tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE_ : Dict =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _snake_case ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : Tuple =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __A )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __A )
self.assertEqual(post_processor_state['''trim_offsets'''] , __A )
def _snake_case ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : str ='hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple =F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE_ : List[str] =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE_ : Any =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE_ : Any =F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE_ : str =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : str =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 443 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
from __future__ import annotations
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
__lowercase = len(__UpperCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__UpperCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __UpperCamelCase , __UpperCamelCase , )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
depth_first_search([] , [] , [] , __UpperCamelCase , __UpperCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(__UpperCamelCase )
print('''''' )
print(len(__UpperCamelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 639 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ : Any = logging.get_logger(__name__)
def A ( UpperCamelCase_ : bool , UpperCamelCase_ : bool ) -> str:
'''simple docstring'''
def run_func(UpperCamelCase_ : Dict ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = random.Random()
lowerCAmelCase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :TensorFlowBenchmarkArguments
snake_case__ :PretrainedConfig
snake_case__ :str = "TensorFlow"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return tf.__version__
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_inference )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_train )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_inference )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_train )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowerCAmelCase__ = (
hasattr(__magic_name__ , "architectures" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ = __import__("transformers" , fromlist=[model_class] )
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowerCAmelCase__ = TF_MODEL_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ = config.vocab_size if hasattr(__magic_name__ , "vocab_size" ) else config.encoder.vocab_size
lowerCAmelCase__ = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__magic_name__ , decoder_input_ids=__magic_name__ , training=__magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowerCAmelCase__ = (
hasattr(__magic_name__ , "architectures" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ = __import__("transformers" , fromlist=[model_class] )
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowerCAmelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ = config.vocab_size if hasattr(__magic_name__ , "vocab_size" ) else config.encoder.vocab_size
lowerCAmelCase__ = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
lowerCAmelCase__ = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase__ = model(__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
lowerCAmelCase__ = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
lowerCAmelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__magic_name__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase__ = timeit.repeat(
__magic_name__ , repeat=self.args.repeat , number=10 , )
return min(__magic_name__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
lowerCAmelCase__ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won\'t log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
lowerCAmelCase__ = 'N/A'
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase__ = nvml.nvmlDeviceGetMemoryInfo(__magic_name__ )
lowerCAmelCase__ = meminfo.used
lowerCAmelCase__ = Memory(__magic_name__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = measure_peak_memory_cpu(__magic_name__ )
lowerCAmelCase__ = Memory(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase__ = stop_memory_tracing(__magic_name__ )
if memory is None:
lowerCAmelCase__ = summary.total
else:
lowerCAmelCase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 48 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowercase ( _A ):
lowercase = None
lowercase = None
@property
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __a ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''padding_value''' ) )
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase = self.feat_extract_tester.prepare_inputs_for_common()
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCamelCase ) == len(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , processed_features[input_name] ) ) )
lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __a ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __a ( self : Tuple , __lowerCamelCase : List[str]=False ) -> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(__lowerCamelCase : Optional[Any] ):
lowercase = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCamelCase , __lowerCamelCase ):
if not np.allclose(np.asarray(__lowerCamelCase ) , np.asarray(__lowerCamelCase ) , atol=1E-3 ):
return False
return True
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCamelCase )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = self.feat_extract_tester.seq_length_diff
lowercase = self.feat_extract_tester.max_seq_length + pad_diff
lowercase = self.feat_extract_tester.min_seq_length
lowercase = self.feat_extract_tester.batch_size
lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase = feat_extract.pad(__lowerCamelCase , padding=__lowerCamelCase )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''np''' )
lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding='''max_length''' )[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=__lowerCamelCase , return_tensors='''np''' )
lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase = feat_extract.pad(__lowerCamelCase , pad_to_multiple_of=10 )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , pad_to_multiple_of=10 )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__lowerCamelCase )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__lowerCamelCase , return_tensors='''np''' , )
lowercase = input_a[input_name]
self.assertTrue(all(len(__lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __a ( self : List[str] , __lowerCamelCase : Union[str, Any]=False ) -> Any:
'''simple docstring'''
def _inputs_have_equal_length(__lowerCamelCase : str ):
lowercase = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase : Dict , __lowerCamelCase : int ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCamelCase , __lowerCamelCase ):
if not np.allclose(np.asarray(__lowerCamelCase ) , np.asarray(__lowerCamelCase ) , atol=1E-3 ):
return False
return True
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCamelCase )
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=__lowerCamelCase )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
# truncate to smallest with np
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=__lowerCamelCase , )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
# truncate to middle
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=__lowerCamelCase , return_tensors='''np''' , )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=__lowerCamelCase )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , truncation=__lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding='''longest''' , truncation=__lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding='''longest''' , truncation=__lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding='''max_length''' , truncation=__lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase = 12
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
lowercase = input_a[input_name]
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCamelCase , )
lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
def __a ( self : int ) -> int:
'''simple docstring'''
self._check_padding(numpify=__lowerCamelCase )
def __a ( self : Any ) -> Dict:
'''simple docstring'''
self._check_padding(numpify=__lowerCamelCase )
def __a ( self : int ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=__lowerCamelCase )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
self._check_truncation(numpify=__lowerCamelCase )
@require_torch
def __a ( self : Tuple ) -> int:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = self.feat_extract_tester.prepare_inputs_for_common()
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __a ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase = self.feature_extraction_class(**self.feat_extract_dict )
lowercase = self.feat_extract_tester.prepare_inputs_for_common()
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self : str ) -> Any:
'''simple docstring'''
lowercase = self.feat_extract_dict
lowercase = True
lowercase = self.feature_extraction_class(**__lowerCamelCase )
lowercase = self.feat_extract_tester.prepare_inputs_for_common()
lowercase = [len(__lowerCamelCase ) for x in speech_inputs]
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = feat_extract.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCamelCase )
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase = self.feat_extract_dict
lowercase = True
lowercase = self.feature_extraction_class(**__lowerCamelCase )
lowercase = self.feat_extract_tester.prepare_inputs_for_common()
lowercase = [len(__lowerCamelCase ) for x in speech_inputs]
lowercase = feat_extract.model_input_names[0]
lowercase = BatchFeature({input_name: speech_inputs} )
lowercase = min(__lowerCamelCase )
lowercase = feat_extract.pad(
__lowerCamelCase , padding='''max_length''' , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 604 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Optional[int]=None , _a:Union[str, Any]=None , _a:int=None , **_a:Optional[int] ):
if tokenize_kwargs is None:
snake_case__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
snake_case__ = truncation
snake_case__ = tokenize_kwargs
snake_case__ = {}
if return_tensors is not None:
snake_case__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[Any] , **_a:Optional[int] ):
snake_case__ = self.framework
snake_case__ = self.tokenizer(_a , return_tensors=_a , **_a )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Dict ):
snake_case__ = self.model(**_a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[int]=False ):
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self:int , *_a:Optional[int] , **_a:int ):
return super().__call__(*_a , **_a )
| 33 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict, _snake_case : Optional[int], _snake_case : Any=7, _snake_case : Dict=3, _snake_case : int=1_0, _snake_case : Dict=1_8, _snake_case : str=3_0, _snake_case : Optional[int]=4_0_0, _snake_case : Any=True, _snake_case : Optional[Any]=None, _snake_case : Union[str, Any]=True, _snake_case : Tuple=[0.5, 0.5, 0.5], _snake_case : Optional[Any]=[0.5, 0.5, 0.5], _snake_case : Any=None, ) ->Optional[int]:
snake_case__ : Tuple = size if size is not None else {'shortest_edge': 1_8}
snake_case__ : Dict = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
snake_case__ : Optional[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = num_channels
snake_case__ : int = num_frames
snake_case__ : Dict = image_size
snake_case__ : Dict = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : int = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Union[str, Any] = image_mean
snake_case__ : Any = image_std
snake_case__ : Tuple = crop_size
def lowercase_ ( self : List[Any] ) ->List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None
def lowercase_ ( self : List[Any] ) ->List[str]:
snake_case__ : Optional[int] = VivitImageProcessingTester(self )
@property
def lowercase_ ( self : int ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ) ->int:
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'image_mean' ) )
self.assertTrue(hasattr(_snake_case, 'image_std' ) )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_resize' ) )
self.assertTrue(hasattr(_snake_case, 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case, 'size' ) )
def lowercase_ ( self : List[Any] ) ->List[Any]:
snake_case__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size, {'height': 1_8, 'width': 1_8} )
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
def lowercase_ ( self : List[Any] ) ->List[str]:
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case__ : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case, _snake_case )
self.assertIsInstance(video[0], Image.Image )
# Test not batched input
snake_case__ : int = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : Optional[int] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : Optional[Any] ) ->Optional[int]:
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = prepare_video_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case, _snake_case )
self.assertIsInstance(video[0], np.ndarray )
# Test not batched input
snake_case__ : Any = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : str = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : Any ) ->Any:
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for video in video_inputs:
self.assertIsInstance(_snake_case, _snake_case )
self.assertIsInstance(video[0], torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : Dict = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 478 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
import itertools
import math
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def lowercase( UpperCamelCase_ = 10001 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 537 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=[0, 1, 2, 3] , ):
lowerCamelCase_ = parent
lowerCamelCase_ = 100
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = out_indices
lowerCamelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = BeitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = BeitForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = BeitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = BeitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BeitForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCamelCase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Tuple = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
a__: Tuple = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__: Optional[Any] = False
a__: Any = False
a__: List[str] = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BeitModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
if not self.model_tester.is_training:
return
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
lowerCamelCase_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCamelCase_ = model(**UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase_ = False
lowerCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase_ = model_class(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase )
model.train()
lowerCamelCase_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCamelCase_ = model(**UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def UpperCAmelCase__ ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BeitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase ( ):
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(UpperCAmelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(UpperCAmelCase )
# prepare bool_masked_pos
lowerCamelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(pixel_values=UpperCAmelCase , bool_masked_pos=UpperCAmelCase )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase , atol=1e-2 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(UpperCAmelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
lowerCamelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
UpperCAmelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
lowerCamelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCamelCase_ = model.to(UpperCAmelCase )
lowerCamelCase_ = BeitImageProcessor(do_resize=UpperCAmelCase , size=640 , do_center_crop=UpperCAmelCase )
lowerCamelCase_ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCamelCase_ = Image.open(ds[0]['''file'''] )
lowerCamelCase_ = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowerCamelCase_ = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowerCamelCase_ = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=UpperCAmelCase , )
else:
lowerCamelCase_ = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCamelCase_ = model.to(UpperCAmelCase )
lowerCamelCase_ = BeitImageProcessor(do_resize=UpperCAmelCase , size=640 , do_center_crop=UpperCAmelCase )
lowerCamelCase_ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCamelCase_ = Image.open(ds[0]['''file'''] )
lowerCamelCase_ = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(500, 300)] )
lowerCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
lowerCamelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
| 29 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
def __UpperCAmelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCAmelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = 0
while number > 0:
UpperCAmelCase__ = number % 1_0
sum_of_digits += last_digit
UpperCAmelCase__ = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCAmelCase ( __A = 1_0_0 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = factorial(__UpperCamelCase )
UpperCAmelCase__ = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 475 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
import datasets
from .evaluate import evaluate
snake_case__ : List[Any] = '''\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'''
snake_case__ : str = '''\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'''
snake_case__ : Dict = '''\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCAmelCase__ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCAmelCase__ = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 392 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
a ="""\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"""
class A_ ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = load_tool('text-question-answering')
self.tool.setup()
__lowerCamelCase : Optional[Any] = load_tool('text-question-answering' ,remote=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Union[str, Any] = self.tool(SCREAMING_SNAKE_CASE__ ,'What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'launched the BigScience Research Workshop')
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.remote_tool(SCREAMING_SNAKE_CASE__ ,'What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'launched the BigScience Research Workshop')
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = self.tool(text=SCREAMING_SNAKE_CASE__ ,question='What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'launched the BigScience Research Workshop')
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = self.remote_tool(text=SCREAMING_SNAKE_CASE__ ,question='What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'launched the BigScience Research Workshop')
| 652 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
from collections import deque
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str =deque()
SCREAMING_SNAKE_CASE_ : int =[False for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE_ : List[str] =[-1 for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE_ : str =index_of[:]
def strong_connect(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ : int =index # the number when this node is seen
SCREAMING_SNAKE_CASE_ : str =index # lowest rank node reachable from here
index += 1
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str =True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE_ : Tuple =strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE_ : str =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE_ : Dict =[]
SCREAMING_SNAKE_CASE_ : Dict =stack.pop()
SCREAMING_SNAKE_CASE_ : Optional[int] =False
component.append(__UpperCamelCase )
while w != v:
SCREAMING_SNAKE_CASE_ : List[Any] =stack.pop()
SCREAMING_SNAKE_CASE_ : int =False
component.append(__UpperCamelCase )
components.append(__UpperCamelCase )
return index
SCREAMING_SNAKE_CASE_ : int =[]
for v in range(__UpperCamelCase ):
if index_of[v] == -1:
strong_connect(__UpperCamelCase , 0 , __UpperCamelCase )
return components
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[[] for _ in range(__UpperCamelCase )]
for u, v in edges:
g[u].append(__UpperCamelCase )
return g
if __name__ == "__main__":
# Test
_lowercase = 7
_lowercase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowercase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowercase = [(u, v) for u, v in zip(source, target)]
_lowercase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 443 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.