code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _lowercase ).groups()[0]
class __lowerCamelCase (_a ):
def __init__( self: Optional[int],A_: Tuple,A_: Optional[Any]=None,A_: Dict=None ):
'''simple docstring'''
__UpperCamelCase = file_names
__UpperCamelCase = image_transform
__UpperCamelCase = label_to_id
def __len__( self: Any ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self: Dict,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = self.file_names[idx]
__UpperCamelCase = PIL.Image.open(A_ )
__UpperCamelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
__UpperCamelCase = self.image_transform(A_ )
__UpperCamelCase = extract_label(A_ )
if self.label_to_id is not None:
__UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = config['image_size']
if not isinstance(_lowercase , (list, tuple) ):
__UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
__UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
__UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__UpperCamelCase = os.path.split(_lowercase )[-1].split('.' )[0]
accelerator.init_trackers(_lowercase , _lowercase )
# Grab all the image filenames
__UpperCamelCase = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
__UpperCamelCase = [extract_label(_lowercase ) for fname in file_names]
__UpperCamelCase = list(set(_lowercase ) )
id_to_label.sort()
__UpperCamelCase = {lbl: i for i, lbl in enumerate(_lowercase )}
# Set the seed before splitting the data.
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# Split our filenames between train and validation
__UpperCamelCase = np.random.permutation(len(_lowercase ) )
__UpperCamelCase = int(0.8 * len(_lowercase ) )
__UpperCamelCase = random_perm[:cut]
__UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__UpperCamelCase = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] )
__UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase )
# For evaluation, we use a deterministic Resize
__UpperCamelCase = Compose([Resize(_lowercase ), ToTensor()] )
__UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = create_model('resnet50d' , pretrained=_lowercase , num_classes=len(_lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__UpperCamelCase = False
for param in model.get_classifier().parameters():
__UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
__UpperCamelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
__UpperCamelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__UpperCamelCase = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
__UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__UpperCamelCase = os.path.splitext(_lowercase )[0]
if "epoch" in training_difference:
__UpperCamelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
__UpperCamelCase = None
else:
__UpperCamelCase = int(training_difference.replace('step_' , '' ) )
__UpperCamelCase = resume_step // len(_lowercase )
resume_step -= starting_epoch * len(_lowercase )
# Now we train the model
for epoch in range(_lowercase , _lowercase ):
model.train()
if args.with_tracking:
__UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__UpperCamelCase = accelerator.skip_first_batches(_lowercase , _lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = torch.nn.functional.cross_entropy(_lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
model.eval()
__UpperCamelCase = 0
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
with torch.no_grad():
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = outputs.argmax(dim=-1 )
__UpperCamelCase, __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
__UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_lowercase ),
'epoch': epoch,
} , step=_lowercase , )
if checkpointing_steps == "epoch":
__UpperCamelCase = f'''epoch_{epoch}'''
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
if args.with_tracking:
accelerator.end_training()
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_lowercase , default=_lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowercase , default=_lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = ["image_processor", "tokenizer"]
a__ : Optional[Any] = "LayoutLMv3ImageProcessor"
a__ : Union[str, Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : str , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ) -> Any:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs.pop('''feature_extractor''' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : int , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [text] # add batch dimension (as the image processor always adds a batch dimension)
_A = features['''words''']
_A = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
_A = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_A = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
_A = images
return encoded_inputs
def snake_case_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_A = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
return images_with_overflow
def snake_case_ ( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> str:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : List[str] ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case_ ( self : Optional[Any] ) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def snake_case_ ( self : List[Any] ) -> Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 2 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
lowerCamelCase_ = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
lowerCamelCase_ = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=A_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
UpperCamelCase = CLIPTextModel(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ )
UpperCamelCase = CLIPTextModelWithProjection(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self , A_ , A_=0 )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image / 2 + 0.5
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = sd_pipe(**A_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**A_ )
UpperCamelCase = sd_pipe.to(A_ )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['prompt']]
UpperCamelCase = sd_pipe(**A_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = 3 * ['this is a negative prompt']
UpperCamelCase = 3 * [inputs.pop('prompt' )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = sd_pipe.encode_prompt(A_ , negative_prompt=A_ )
UpperCamelCase = sd_pipe(
**A_ , prompt_embeds=A_ , negative_prompt_embeds=A_ , pooled_prompt_embeds=A_ , negative_pooled_prompt_embeds=A_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 )-> Dict:
'''simple docstring'''
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
UpperCamelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = self.get_inputs(A_ )
UpperCamelCase = pipe(**A_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 3 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :int , __lowerCamelCase :Tuple , __lowerCamelCase :Any ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowerCAmelCase = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :str , __lowerCamelCase :int ):
# A temporary array to store all combination one by one
_lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 5 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
def __init__( self :Optional[Any] , __A :List[Any] , __A :int=13 , __A :Tuple=3 , __A :Any=True , __A :Any=True , __A :Optional[Any]=0.1 , __A :List[Any]=0.1 , __A :Dict=224 , __A :Optional[Any]=1000 , __A :List[Any]=[3, 3, 6, 4] , __A :List[Any]=[48, 56, 112, 220] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = layer_depths
SCREAMING_SNAKE_CASE__ = embed_dims
def _snake_case ( self :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__A , layer_scale_init_value=1E-5 , )
def _snake_case ( self :Tuple , __A :Dict , __A :str , __A :List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SwiftFormerModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self :Tuple , __A :str , __A :List[str] , __A :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = SwiftFormerForImageClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE__ = SwiftFormerForImageClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase_ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self :Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self :Dict ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__A )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__A )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = SwiftFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__A :List[Any] , __A :str , __A :List[str] ):
SCREAMING_SNAKE_CASE__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = 8
self.assertEqual(len(__A ) , __A ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__A ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__A , __A , __A )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
def _config_zero_init(__A :Optional[int] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(__A )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__A , __A , 1E-10 )
if isinstance(getattr(__A , __A , __A ) , __A ):
SCREAMING_SNAKE_CASE__ = _config_zero_init(getattr(__A , __A ) )
setattr(__A , __A , __A )
return configs_no_init
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(__A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__A )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
_A = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'ะะฐัะธะฝะฝะพะต ะพะฑััะตะฝะธะต - ััะพ ะทะดะพัะพะฒะพ, ะฝะต ัะฐะบ ะปะธ?',
'de': 'Maschinelles Lernen ist groรartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_A = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
_A = F'''{src_lang}-{tgt_lang}'''
_A = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_snake_case , exist_ok=_snake_case )
_A = os.path.join(_snake_case , 'README.md' )
print(F'''Generating {path}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(_snake_case )
# make sure we are under the root of the project
a = Path(__file__).resolve().parent.parent.parent
a = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a , a , a = model_name.split('''-''')
a = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : str = kwargs.pop('feature_extractor')
__A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
# first, apply the image processor
__A : Dict = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__A : Dict = features['words']
__A : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__A : Dict = features.pop('pixel_values')
if return_overflowing_tokens is True:
__A : str = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'])
__A : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(_UpperCAmelCase)} and {len(_UpperCAmelCase)}')
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 8 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 0 |
from math import ceil
def A ( __UpperCamelCase = 1_001 ) -> int:
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 9 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_lowerCAmelCase, _lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_lowerCAmelCase = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_lowerCAmelCase = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 10 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase_ = {
"facebook/blenderbot_small-90M": 512,
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = BlenderbotSmallTokenizer
def __init__(self , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , A=True , **A , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=A , merges=A , add_prefix_space=A , trim_offsets=A , ) , bos_token=A , eos_token=A , unk_token=A , **A , )
_a = add_prefix_space
def a__ (self , A , A=None ) -> List[Any]:
"""simple docstring"""
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 11 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
lowercase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowercase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs]
return image_inputs
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Optional[int] = self.get_rust_tokenizer()
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
processor_slow.save_pretrained(self.tmpdirname)
lowercase__ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
processor_fast.save_pretrained(self.tmpdirname)
lowercase__ : Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowercase__ : Optional[int] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0)
lowercase__ : Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Dict = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""")
lowercase__ : Any = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = """lower newer"""
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = """lower newer"""
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : List[str] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : str = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : int = processor.batch_decode(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = """lower newer"""
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 12 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 0 |
'''simple docstring'''
A__ : List[str] = [0, 2, 4, 6, 8]
A__ : Union[str, Any] = [1, 3, 5, 7, 9]
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase : Optional[Any] = 0
for digit in range(10 ):
__lowerCamelCase : Optional[int] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCAmelCase_ , UpperCAmelCase_ )
return result
__lowerCamelCase : Optional[int] = 0
for digita in range(10 ):
__lowerCamelCase : List[Any] = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase : int = ODD_DIGITS
else:
__lowerCamelCase : Union[str, Any] = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCAmelCase_ , UpperCAmelCase_ , )
return result
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 9 ) -> int:
__lowerCamelCase : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCAmelCase_ , 0 , [0] * length , UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 0 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> List[str]:
_a : List[Any] = data
_a : List[str] = previous
_a : Union[str, Any] = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def __lowercase ( self ) -> int:
return self.data
def __lowercase ( self ) -> Union[str, Any]:
return self.next
def __lowercase ( self ) -> str:
return self.previous
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> str:
_a : int = head
def __iter__( self ) -> List[Any]:
return self
def __lowercase ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
_a : Optional[int] = self.current.get_data()
_a : Dict = self.current.get_next()
return value
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
_a : Tuple = None # First node in list
_a : List[str] = None # Last node in list
def __str__( self ) -> List[str]:
_a : Optional[int] = self.head
_a : int = []
while current is not None:
nodes.append(current.get_data() )
_a : Optional[Any] = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
_a : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> Dict:
return LinkedListIterator(self.head )
def __lowercase ( self ) -> Any:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self , _a ) -> None:
if self.head is None:
_a : str = node
_a : Tuple = node
else:
self.insert_before_node(self.head , _a )
def __lowercase ( self , _a ) -> None:
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> None:
_a : str = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : List[str] = node
_a : str = node.previous
if node.get_previous() is None:
_a : Optional[int] = node_to_insert
else:
_a : List[str] = node_to_insert
_a : List[str] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : Union[str, Any] = node
_a : List[str] = node.next
if node.get_next() is None:
_a : Any = node_to_insert
else:
_a : List[str] = node_to_insert
_a : Union[str, Any] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : List[Any] = 1
_a : Any = Node(_a )
_a : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
_a : Tuple = node.next
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> Node:
_a : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_a : Any = node.get_next()
raise Exception('''Node not found''' )
def __lowercase ( self , _a ) -> Union[str, Any]:
if (node := self.get_node(_a )) is not None:
if node == self.head:
_a : Optional[Any] = self.head.get_next()
if node == self.tail:
_a : Any = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def __lowercase ( _a ) -> None:
if node.get_next():
_a : Optional[int] = node.previous
if node.get_previous():
_a : List[Any] = node.next
_a : Optional[int] = None
_a : Tuple = None
def __lowercase ( self ) -> str:
return self.head is None
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Dict , *_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
lowercase__ = quant_trainer_args
lowercase__ = 128 # default number of calibration samples
def lowerCamelCase__ (self : int , _UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
lowercase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase__ = self._remove_unused_columns(_UpperCAmelCase , description="""Calibration""" )
return DataLoader(
_UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Dict=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowercase__ = self.get_calib_dataloader(_UpperCAmelCase )
lowercase__ = self.model
quant_trainer.configure_model(_UpperCAmelCase , self.quant_trainer_args , calib=_UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(_UpperCAmelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_UpperCAmelCase ):
# Prediction step
lowercase__ , lowercase__ , lowercase__ = self.prediction_step(_UpperCAmelCase , _UpperCAmelCase , prediction_loss_only=_UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_UpperCAmelCase , self.quant_trainer_args )
lowercase__ = model
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(_UpperCAmelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
_UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase__ = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
lowercase__ = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowercase__ = metrics.pop(_UpperCAmelCase )
self.log(_UpperCAmelCase )
else:
lowercase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : str = "test" ) -> int:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
_UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , """predict""" )
lowercase__ = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowercase__ = metrics.pop(_UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[str]="./" ) -> List[str]:
"""simple docstring"""
lowercase__ = self.eval_dataset
lowercase__ = self.get_eval_dataloader(_UpperCAmelCase )
lowercase__ = next(iter(_UpperCAmelCase ) )
# saving device - to make it consistent
lowercase__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
lowercase__ = tuple(v.to(_UpperCAmelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
lowercase__ = True
lowercase__ = self.model.to(_UpperCAmelCase )
model.eval()
model.float()
lowercase__ = model.module if hasattr(_UpperCAmelCase , """module""" ) else model
quant_trainer.configure_model(_UpperCAmelCase , self.quant_trainer_args )
lowercase__ = os.path.join(_UpperCAmelCase , """model.onnx""" )
logger.info(f'''exporting model to {output_model_file}''' )
lowercase__ = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , export_params=_UpperCAmelCase , opset_version=13 , do_constant_folding=_UpperCAmelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_UpperCAmelCase , )
logger.info("""onnx export finished""" )
| 15 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A : Optional[Any] = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = {}
state_dict.pop("pixel_mean" , A__ )
state_dict.pop("pixel_std" , A__ )
SCREAMING_SNAKE_CASE = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
SCREAMING_SNAKE_CASE = int(re.match(A__ , A__ ).group(2 ) )
if layer_nb == 0:
SCREAMING_SNAKE_CASE = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
SCREAMING_SNAKE_CASE = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
SCREAMING_SNAKE_CASE = key.replace("layers.2" , "proj_out" )
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __a ( A__ : Union[str, Any] , A__ : Tuple , A__ : int , A__ : str="ybelkada/segment-anything" ):
SCREAMING_SNAKE_CASE = hf_hub_download(A__ , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
SCREAMING_SNAKE_CASE = SamConfig()
elif "sam_vit_l" in model_name:
SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
SCREAMING_SNAKE_CASE = SamConfig(
vision_config=A__ , )
elif "sam_vit_h" in model_name:
SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
SCREAMING_SNAKE_CASE = SamConfig(
vision_config=A__ , )
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
SCREAMING_SNAKE_CASE = replace_keys(A__ )
SCREAMING_SNAKE_CASE = SamImageProcessor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=A__ )
SCREAMING_SNAKE_CASE = SamModel(A__ )
hf_model.load_state_dict(A__ )
SCREAMING_SNAKE_CASE = hf_model.to("cuda" )
SCREAMING_SNAKE_CASE = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("RGB" )
SCREAMING_SNAKE_CASE = [[[400, 650]]]
SCREAMING_SNAKE_CASE = [[1]]
SCREAMING_SNAKE_CASE = processor(images=np.array(A__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
SCREAMING_SNAKE_CASE = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),)
SCREAMING_SNAKE_CASE = processor(images=np.array(A__ ) , input_boxes=A__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]]
SCREAMING_SNAKE_CASE = [[1, 1]]
SCREAMING_SNAKE_CASE = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
__A : Dict = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__A : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 16 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 0 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria',
'Secretarul General Ban Ki-moon declarฤ cฤ rฤspunsul sฤu la intensificarea sprijinului militar al Rusiei'
' pentru Siria este cฤ "nu existฤ o soluลฃie militarฤ" la conflictul de aproape cinci ani ลi cฤ noi arme nu vor'
' face decรขt sฤ รฎnrฤutฤลฃeascฤ violenลฃele ลi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase:
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = d_model
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = prediction_length
_UpperCamelCase = context_length
_UpperCamelCase = cardinality
_UpperCamelCase = num_time_features
_UpperCamelCase = lags_sequence
_UpperCamelCase = embedding_dimension
_UpperCamelCase = is_training
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = context_length
_UpperCamelCase = prediction_length + label_length
_UpperCamelCase = label_length
_UpperCamelCase = moving_average
_UpperCamelCase = autocorrelation_factor
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = config.context_length + max(config.lags_sequence)
_UpperCamelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
_UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
_UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length])
_UpperCamelCase = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.prepare_autoformer_inputs_dict(__a)
return config, inputs_dict
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AutoformerModel(config=__a).to(__a).eval()
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.encoder_last_hidden_state
_UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_encoder()
encoder.save_pretrained(__a)
_UpperCamelCase = AutoformerEncoder.from_pretrained(__a).to(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model.create_network_inputs(**__a)
_UpperCamelCase , _UpperCamelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
_UpperCamelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCamelCase = encoder(inputs_embeds=__a)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
_UpperCamelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
_UpperCamelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCamelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCamelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_decoder()
decoder.save_pretrained(__a)
_UpperCamelCase = AutoformerDecoder.from_pretrained(__a).to(__a)
_UpperCamelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a)
_UpperCamelCase , _UpperCamelCase = model_class.from_pretrained(__a , output_loading_info=__a)
self.assertEqual(info['''missing_keys'''] , [])
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a)
@unittest.skip(reason='''Model has no tokens embeddings''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = inspect.signature(getattr(__a , '''forward'''))
# The main input is the name of the argument after `self`
_UpperCamelCase = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''')
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
])
self.assertListEqual(arg_names[: len(__a)] , __a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , '''seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''decoder_seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''encoder_seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''d_model''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''num_attention_heads''' , __a)
_UpperCamelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCamelCase = len(__a)
_UpperCamelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a)
# decoder attentions
_UpperCamelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCamelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
self.assertEqual(out_len + 2 , len(__a))
_UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( __snake_case="train-batch.pt" ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''', filename=__snake_case, repo_type='''dataset''' )
_UpperCamelCase = torch.load(__snake_case, map_location=__snake_case )
return batch
@require_torch
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch()
with torch.no_grad():
_UpperCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_UpperCamelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch('''val-batch.pt''')
with torch.no_grad():
_UpperCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_UpperCamelCase = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch('''val-batch.pt''')
with torch.no_grad():
_UpperCamelCase = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_UpperCamelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , __a)
_UpperCamelCase = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a)
_UpperCamelCase = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1))
| 19 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase: List[Any] = logging.getLogger(__name__)
def _lowercase( __a : str , __a : Any ):
return (preds == labels).mean()
@dataclass
class lowercase_ :
snake_case =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case =field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case =field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
snake_case =field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowercase_ :
snake_case =field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
snake_case =field(metadata={'help': 'Should contain the data files for the task.'} )
snake_case =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
snake_case =field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowercase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
try:
a__ =processors[data_args.task_name]()
a__ =processor.get_labels()
a__ =len(__a )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a__ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
# Get datasets
a__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__a : EvalPrediction ) -> Dict:
a__ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__a , p.label_ids )}
# Data collator
a__ =DataCollatorWithPadding(__a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ =Trainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , data_collator=__a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ =trainer.evaluate()
a__ =os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __a , __a )
writer.write('%s = %s\n' % (key, value) )
results.update(__a )
return results
def _lowercase( __a : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 20 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('ยก' ) ,ord('ยฌ' ) + 1 ) ) + list(range(ord('ยฎ' ) ,ord('รฟ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Optional[int] = "MobileNetV1Config"
# Base docstring
UpperCAmelCase_ : str = "google/mobilenet_v1_1.0_224"
UpperCAmelCase_ : Tuple = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Optional[Any] = "google/mobilenet_v1_1.0_224"
UpperCAmelCase_ : str = "tabby, tabby cat"
UpperCAmelCase_ : Union[str, Any] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
__magic_name__ : Optional[int] ={}
if isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[Any] =model.mobilenet_va
else:
__magic_name__ : str =model
__magic_name__ : Tuple ="""MobilenetV1/Conv2d_0/"""
__magic_name__ : str =backbone.conv_stem.convolution.weight
__magic_name__ : Any =backbone.conv_stem.normalization.bias
__magic_name__ : Union[str, Any] =backbone.conv_stem.normalization.weight
__magic_name__ : Optional[Any] =backbone.conv_stem.normalization.running_mean
__magic_name__ : Any =backbone.conv_stem.normalization.running_var
for i in range(13 ):
__magic_name__ : Optional[Any] =i + 1
__magic_name__ : List[str] =i * 2
__magic_name__ : List[str] =backbone.layer[pt_index]
__magic_name__ : Any =F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__magic_name__ : Any =pointer.convolution.weight
__magic_name__ : List[str] =pointer.normalization.bias
__magic_name__ : Optional[Any] =pointer.normalization.weight
__magic_name__ : List[str] =pointer.normalization.running_mean
__magic_name__ : List[str] =pointer.normalization.running_var
__magic_name__ : int =backbone.layer[pt_index + 1]
__magic_name__ : List[Any] =F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__magic_name__ : Optional[Any] =pointer.convolution.weight
__magic_name__ : int =pointer.normalization.bias
__magic_name__ : List[str] =pointer.normalization.weight
__magic_name__ : Optional[Any] =pointer.normalization.running_mean
__magic_name__ : Optional[int] =pointer.normalization.running_var
if isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] ="""MobilenetV1/Logits/Conv2d_1c_1x1/"""
__magic_name__ : str =model.classifier.weight
__magic_name__ : str =model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__magic_name__ : Optional[int] =tf.train.list_variables(lowerCamelCase )
__magic_name__ : Dict ={}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__magic_name__ : Optional[Any] =tf.train.load_variable(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =array
# Build TF to PyTorch weights loading map
__magic_name__ : int =_build_tf_to_pytorch_map(lowerCamelCase , lowerCamelCase , lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__magic_name__ : Optional[Any] =tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__magic_name__ : Union[str, Any] =np.transpose(lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__ : str =array.squeeze().transpose()
else:
__magic_name__ : List[Any] =np.transpose(lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__magic_name__ : Optional[Any] =torch.from_numpy(lowerCamelCase )
tf_weights.pop(lowerCamelCase , lowerCamelCase )
tf_weights.pop(name + """/RMSProp""" , lowerCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , lowerCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , lowerCamelCase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ : int =features.shape[-2:]
__magic_name__ , __magic_name__ : Tuple =conv_layer.stride
__magic_name__ , __magic_name__ : str =conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__ : List[str] =max(kernel_height - stride_height , 0 )
else:
__magic_name__ : int =max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__magic_name__ : int =max(kernel_width - stride_width , 0 )
else:
__magic_name__ : List[Any] =max(kernel_width - (in_width % stride_width) , 0 )
__magic_name__ : int =pad_along_width // 2
__magic_name__ : Union[str, Any] =pad_along_width - pad_left
__magic_name__ : Union[str, Any] =pad_along_height // 2
__magic_name__ : str =pad_along_height - pad_top
__magic_name__ : int =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase , lowerCamelCase , """constant""" , 0.0 )
class __A ( nn.Module ):
def __init__( self :Tuple , __snake_case :MobileNetVaConfig , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :Optional[int] = 1 , __snake_case :Optional[int] = 1 , __snake_case :bool = False , __snake_case :Optional[bool] = True , __snake_case :Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
__magic_name__ : Union[str, Any] =config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
__magic_name__ : List[Any] =0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__ : Union[str, Any] =nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case , groups=__snake_case , bias=__snake_case , padding_mode="""zeros""" , )
if use_normalization:
__magic_name__ : Any =nn.BatchNormad(
num_features=__snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=__snake_case , track_running_stats=__snake_case , )
else:
__magic_name__ : int =None
if use_activation:
if isinstance(__snake_case , __snake_case ):
__magic_name__ : int =ACTaFN[use_activation]
elif isinstance(config.hidden_act , __snake_case ):
__magic_name__ : List[Any] =ACTaFN[config.hidden_act]
else:
__magic_name__ : Tuple =config.hidden_act
else:
__magic_name__ : Any =None
def A__ ( self :Union[str, Any] , __snake_case :torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
__magic_name__ : Tuple =apply_tf_padding(__snake_case , self.convolution )
__magic_name__ : int =self.convolution(__snake_case )
if self.normalization is not None:
__magic_name__ : Optional[Any] =self.normalization(__snake_case )
if self.activation is not None:
__magic_name__ : Dict =self.activation(__snake_case )
return features
class __A ( UpperCamelCase__ ):
UpperCamelCase = MobileNetVaConfig
UpperCamelCase = load_tf_weights_in_mobilenet_va
UpperCamelCase = """mobilenet_v1"""
UpperCamelCase = """pixel_values"""
UpperCamelCase = False
def A__ ( self :Dict , __snake_case :Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase_ : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , UpperCamelCase__ , )
class __A ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __snake_case :MobileNetVaConfig , __snake_case :bool = True ):
'''simple docstring'''
super().__init__(__snake_case )
__magic_name__ : Tuple =config
__magic_name__ : int =32
__magic_name__ : Optional[int] =max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__ : str =MobileNetVaConvLayer(
__snake_case , in_channels=config.num_channels , out_channels=__snake_case , kernel_size=3 , stride=2 , )
__magic_name__ : List[Any] =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__ : List[str] =nn.ModuleList()
for i in range(13 ):
__magic_name__ : Optional[int] =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__ : Dict =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=3 , stride=strides[i] , groups=__snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=1 , ) )
__magic_name__ : Optional[Any] =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A__ ( self :Optional[int] , __snake_case :Optional[int] ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self :Union[str, Any] , __snake_case :Optional[torch.Tensor] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ : str =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__magic_name__ : Any =self.conv_stem(__snake_case )
__magic_name__ : Optional[int] =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__ : Tuple =layer_module(__snake_case )
if output_hidden_states:
__magic_name__ : int =all_hidden_states + (hidden_states,)
__magic_name__ : Optional[int] =hidden_states
if self.pooler is not None:
__magic_name__ : str =torch.flatten(self.pooler(__snake_case ) , start_dim=1 )
else:
__magic_name__ : Tuple =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=__snake_case , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCamelCase__ , )
class __A ( UpperCamelCase__ ):
def __init__( self :int , __snake_case :MobileNetVaConfig ):
'''simple docstring'''
super().__init__(__snake_case )
__magic_name__ : Any =config.num_labels
__magic_name__ : List[Any] =MobileNetVaModel(__snake_case )
__magic_name__ : int =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__ : List[Any] =nn.Dropout(config.classifier_dropout_prob , inplace=__snake_case )
__magic_name__ : Dict =nn.Linear(__snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self :List[str] , __snake_case :Optional[torch.Tensor] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[torch.Tensor] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ : Any =self.mobilenet_va(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
__magic_name__ : Any =outputs.pooler_output if return_dict else outputs[1]
__magic_name__ : List[Any] =self.classifier(self.dropout(__snake_case ) )
__magic_name__ : List[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ : Any ="""regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ : Union[str, Any] ="""single_label_classification"""
else:
__magic_name__ : Tuple ="""multi_label_classification"""
if self.config.problem_type == "regression":
__magic_name__ : List[str] =MSELoss()
if self.num_labels == 1:
__magic_name__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ : int =loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
__magic_name__ : List[str] =CrossEntropyLoss()
__magic_name__ : Optional[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ : Dict =BCEWithLogitsLoss()
__magic_name__ : Tuple =loss_fct(__snake_case , __snake_case )
if not return_dict:
__magic_name__ : int =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , )
| 21 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
'''simple docstring'''
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 22 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase (_lowerCamelCase : float )-> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0]
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 24 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
lowerCamelCase_ = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
lowerCamelCase_ = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =42
# setable values
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
@classmethod
def __UpperCamelCase ( cls : Tuple , a : CommonSchedulerState , a : jnp.ndarray , a : jnp.ndarray ) -> Dict:
"""simple docstring"""
return cls(common=a , init_noise_sigma=a , timesteps=a )
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =[e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ =42
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , a : int = 1000 , a : float = 0.0001 , a : float = 0.02 , a : str = "linear" , a : Optional[jnp.ndarray] = None , a : str = "fixed_small" , a : bool = True , a : str = "epsilon" , a : jnp.dtype = jnp.floataa , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = dtype
def __UpperCamelCase ( self : Any , a : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
SCREAMING_SNAKE_CASE : Optional[int] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : Tuple = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=a , init_noise_sigma=a , timesteps=a , )
def __UpperCamelCase ( self : List[Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : Optional[int] , a : DDPMSchedulerState , a : int , a : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : Dict = (jnp.arange(0 , a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=a , timesteps=a , )
def __UpperCamelCase ( self : Optional[Any] , a : DDPMSchedulerState , a : List[str] , a : Any=None , a : Dict=None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE : str = jnp.clip(a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE : Any = jnp.log(jnp.clip(a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE : int = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE : str = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE : Any = variance
SCREAMING_SNAKE_CASE : int = state.common.betas[t]
SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : Any , a : DDPMSchedulerState , a : jnp.ndarray , a : int , a : jnp.ndarray , a : Optional[jax.random.KeyArray] = None , a : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = timestep
if key is None:
SCREAMING_SNAKE_CASE : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.split(a , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE : int = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE : Tuple = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Tuple = jnp.clip(a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample ยต_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(a , num=1 )
SCREAMING_SNAKE_CASE : Dict = jax.random.normal(a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(a , a , predicted_variance=a ) ** 0.5) * noise
SCREAMING_SNAKE_CASE : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=a , state=a )
def __UpperCamelCase ( self : Union[str, Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , a , a , a )
def __UpperCamelCase ( self : str , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , a , a , a )
def __len__( self : int ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 25 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 0 |
from math import factorial
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
_A = real
if isinstance(snake_case_ , snake_case_ ):
_A = [1] * rank
else:
_A = rank
def __repr__( self ):
return (
F"{self.real}+"
F"{'+'.join(str(snake_case_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def lowerCAmelCase__ ( self ):
_A = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case_ )
def __add__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
return Dual(self.real + other , self.duals )
_A = self.duals.copy()
_A = other.duals.copy()
if len(snake_case_ ) > len(snake_case_ ):
o_dual.extend([1] * (len(snake_case_ ) - len(snake_case_ )) )
elif len(snake_case_ ) < len(snake_case_ ):
s_dual.extend([1] * (len(snake_case_ ) - len(snake_case_ )) )
_A = []
for i in range(len(snake_case_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case_ )
__magic_name__ = __add__
def __sub__( self , snake_case_ ):
return self + other * -1
def __mul__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_A = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case_ )
_A = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case_ )
__magic_name__ = __mul__
def __truediv__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_A = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case_ )
raise ValueError
def __floordiv__( self , snake_case_ ):
if not isinstance(snake_case_ , snake_case_ ):
_A = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case_ )
raise ValueError
def __pow__( self , snake_case_ ):
if n < 0 or isinstance(snake_case_ , snake_case_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_A = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not callable(_SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(_SCREAMING_SNAKE_CASE , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires an int as input for order' )
_A = Dual(_SCREAMING_SNAKE_CASE , 1 )
_A = func(_SCREAMING_SNAKE_CASE )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 27 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = XLMProphetNetTokenizer
A : List[str] = False
A : Union[str, Any] = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : List[str] = XLMProphetNetTokenizer(A, keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '[PAD]'
SCREAMING_SNAKE_CASE : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '[PAD]' )
self.assertEqual(vocab_keys[1], '[CLS]' )
self.assertEqual(vocab_keys[-1], 'j' )
self.assertEqual(len(A ), 1_012 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_012 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = XLMProphetNetTokenizer(A, keep_accents=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(A, ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
], )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
], )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
], )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'Hello World!'
SCREAMING_SNAKE_CASE : int = [35_389, 6_672, 49, 2]
self.assertListEqual(A, self.big_tokenizer.encode(A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A, model_name='microsoft/xprophetnet-large-wiki100-cased', revision='1acad1643ddd54a44df6a1b797ada8373685d90e', )
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = []
for part_id in partition_order:
lowerCamelCase_ = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(100 ).repartition(1 )
lowerCamelCase_ = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(10 ).repartition(2 )
lowerCamelCase_ = [1, 0]
lowerCamelCase_ = _generate_iterable_examples(lowerCAmelCase__ ,lowerCAmelCase__ ) # Reverse the partitions.
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(10 ).repartition(1 )
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowerCamelCase_ = lambda lowerCAmelCase__ : x.reverse()
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[2, 1, 0] )
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(100 ).repartition(1 )
lowerCamelCase_ = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 29 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 0 |
import os
from datetime import datetime as dt
from github import Github
__a = [
'good first issue',
'feature request',
'wip',
]
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase_ : Union[str, Any] = g.get_repo('''huggingface/accelerate''' )
UpperCAmelCase_ : str = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase_ : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase_ : Optional[Any] = comments[0] if len(_lowercase ) > 0 else None
UpperCAmelCase_ : Optional[Any] = dt.utcnow()
UpperCAmelCase_ : List[Any] = (current_time - issue.updated_at).days
UpperCAmelCase_ : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 30 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__UpperCAmelCase )]
SCREAMING_SNAKE_CASE_ = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__UpperCAmelCase ) <= key:
return input_string
for position, character in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [''.join(__UpperCAmelCase ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ = ''.join(__UpperCAmelCase )
return output_string
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__UpperCAmelCase )] # generates template
for position in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
SCREAMING_SNAKE_CASE_ = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ = input_string[counter : counter + len(__UpperCAmelCase )]
grid.append(list(__UpperCAmelCase ) )
counter += len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = '' # reads as zigzag
for position in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict[int, str]:
SCREAMING_SNAKE_CASE_ = {}
for key_guess in range(1 , len(__UpperCAmelCase ) ): # tries every key
SCREAMING_SNAKE_CASE_ = decrypt(__UpperCAmelCase , __UpperCAmelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : List[Any] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
UpperCamelCase = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCamelCase = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCamelCase = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
UpperCamelCase = [3, 3, 3, 3]
UpperCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
UpperCamelCase = [4, 4, 4, 4]
UpperCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
UpperCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
UpperCamelCase = [3, 3, 3, 3]
else:
UpperCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
UpperCamelCase = 96
elif "small" in model_name:
UpperCamelCase = 96
elif "base" in model_name:
UpperCamelCase = 128
elif "large" in model_name:
UpperCamelCase = 192
elif "xlarge" in model_name:
UpperCamelCase = 256
elif "huge" in model_name:
UpperCamelCase = 352
# set label information
UpperCamelCase = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
UpperCamelCase = '''imagenet-22k-id2label.json'''
else:
UpperCamelCase = '''imagenet-1k-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_lowercase ,_lowercase ,repo_type='''dataset''' ) ,'''r''' ) )
UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = FocalNetConfig(
embed_dim=_lowercase ,depths=_lowercase ,focal_levels=_lowercase ,focal_windows=_lowercase ,use_conv_embed=_lowercase ,idalabel=_lowercase ,labelaid=_lowercase ,use_post_layernorm=_lowercase ,use_layerscale=_lowercase ,)
return config
def __snake_case ( _lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
UpperCamelCase = '''encoder.''' + name
if "encoder.layers" in name:
UpperCamelCase = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
UpperCamelCase = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
UpperCamelCase = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
UpperCamelCase = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
UpperCamelCase = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
UpperCamelCase = '''layernorm.weight'''
if name == "norm.bias":
UpperCamelCase = '''layernorm.bias'''
if "head" in name:
UpperCamelCase = name.replace('''head''' ,'''classifier''' )
else:
UpperCamelCase = '''focalnet.''' + name
return name
def __snake_case ( _lowercase ,_lowercase ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
UpperCamelCase = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
UpperCamelCase = get_focalnet_config(_lowercase )
UpperCamelCase = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = BitImageProcessor(
do_resize=_lowercase ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=_lowercase ,crop_size=224 ,do_normalize=_lowercase ,image_mean=_lowercase ,image_std=_lowercase ,)
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = processor(images=_lowercase ,return_tensors='''pt''' )
UpperCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
UpperCamelCase = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,_lowercase ,atol=1e-4 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
UpperCamelCase = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
UpperCamelCase = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
UpperCamelCase = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
UpperCamelCase = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
UpperCamelCase = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
UpperCamelCase = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] ,_lowercase ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=A__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=A__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=A__ )
return parser.parse_args()
def a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : Any = script_fpath.stem
SCREAMING_SNAKE_CASE__ : int = importlib.import_module(A__ )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : List[Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 35 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : Union[str, Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(self ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = repo_info
snake_case : Dict = token
snake_case : Any = None
def snake_case_ ( self ):
'''simple docstring'''
if self.dir_cache is None:
snake_case : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
snake_case : Union[str, Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {"""name""": str(SCREAMING_SNAKE_CASE_ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "rb" ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
if not isinstance(self.repo_info ,SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
snake_case : Tuple = hf_hub_url(self.repo_info.id ,SCREAMING_SNAKE_CASE_ ,revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ ,mode=SCREAMING_SNAKE_CASE_ ,headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self._get_dirs()
snake_case : List[Any] = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self._get_dirs()
snake_case : List[str] = PurePosixPath(path.strip("""/""" ) )
snake_case : Optional[int] = {}
for p, f in self.dir_cache.items():
snake_case : List[str] = PurePosixPath(p.strip("""/""" ) )
snake_case : int = p.parent
if root == path:
snake_case : Any = f
snake_case : Optional[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 36 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 0 |
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase : Any = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def UpperCamelCase_ ( __a ) -> str:
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 37 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : int = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''mra'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_0_2_6_5 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="full" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : List[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : List[str] = type_vocab_size
snake_case__ : str = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : Union[str, Any] = block_per_row
snake_case__ : str = approx_mode
snake_case__ : int = initial_prior_first_n_blocks
snake_case__ : str = initial_prior_diagonal_n_blocks
| 38 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 0 |
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return 1 / (1 + np.exp(-vector ))
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
import math
import random
def UpperCamelCase ( snake_case__ : float , snake_case__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
UpperCamelCase : Optional[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
UpperCamelCase : str = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : int = (expected / 100) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('''Expected value: '''))
__UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 40 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria',
'Secretarul General Ban Ki-moon declarฤ cฤ rฤspunsul sฤu la intensificarea sprijinului militar al Rusiei'
' pentru Siria este cฤ "nu existฤ o soluลฃie militarฤ" la conflictul de aproape cinci ani ลi cฤ noi arme nu vor'
' face decรขt sฤ รฎnrฤutฤลฃeascฤ violenลฃele ลi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass ):
_lowercase : Dict = FLAX_MODEL_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass ):
_lowercase : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a ( _BaseAutoModelClass ):
_lowercase : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a ( _BaseAutoModelClass ):
_lowercase : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a ( _BaseAutoModelClass ):
_lowercase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a ( _BaseAutoModelClass ):
_lowercase : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a ( _BaseAutoModelClass ):
_lowercase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a ( _BaseAutoModelClass ):
_lowercase : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a ( _BaseAutoModelClass ):
_lowercase : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a ( _BaseAutoModelClass ):
_lowercase : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a ( _BaseAutoModelClass ):
_lowercase : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a ( _BaseAutoModelClass ):
_lowercase : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a ( _BaseAutoModelClass ):
_lowercase : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 43 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('ยก' ) ,ord('ยฌ' ) + 1 ) ) + list(range(ord('ยฎ' ) ,ord('รฟ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase__ ( A ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( __A : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self : int ):
raise NotImplementedError()
| 44 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Dict:
UpperCamelCase__ :Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
UpperCamelCase__ :List[str] = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
UpperCamelCase__ :Union[str, Any] = parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[Any] = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 45 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : str = val
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_lowerCamelCase : str = value
else:
_lowerCamelCase : Any = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : str = ""
if is_panoptic:
_lowerCamelCase : Any = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : int = in_proj_weight[:256, :]
_lowerCamelCase : Dict = in_proj_bias[:256]
_lowerCamelCase : int = in_proj_weight[256:512, :]
_lowerCamelCase : Tuple = in_proj_bias[256:512]
_lowerCamelCase : Tuple = in_proj_weight[-256:, :]
_lowerCamelCase : Optional[int] = in_proj_bias[-256:]
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : Optional[int] = "resnet101"
if "dc5" in model_name:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = "panoptic" in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 250
else:
_lowerCamelCase : Optional[Any] = 91
_lowerCamelCase : List[Any] = "huggingface/label-files"
_lowerCamelCase : List[Any] = "coco-detection-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : Any = "coco_panoptic" if is_panoptic else "coco_detection"
_lowerCamelCase : Tuple = ConditionalDetrImageProcessor(format=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : str = image_processor(images=_lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = encoding["pixel_values"]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
_lowerCamelCase : int = torch.hub.load("DeppMeng/ConditionalDETR" , _lowerCamelCase , pretrained=_lowerCamelCase ).eval()
_lowerCamelCase : Optional[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : Tuple = "conditional_detr." + src
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Tuple = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_lowerCamelCase : List[Any] = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : List[str] = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Tuple = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Any = ConditionalDetrForSegmentation(_lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCamelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_lowerCamelCase : List[Any] = conditional_detr(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = model(_lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 46 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase__ ( lowerCamelCase_ : BertModel , lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : str = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__a : int = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
__a : Union[str, Any] = model.state_dict()
def to_tf_var_name(lowerCamelCase_ : str ):
for patt, repl in iter(lowerCamelCase_ ):
__a : Union[str, Any] = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return f'''bert/{name}'''
def create_tf_var(lowerCamelCase_ : np.ndarray , lowerCamelCase_ : str , lowerCamelCase_ : tf.Session ):
__a : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__a : List[str] = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Optional[Any] = to_tf_var_name(lowerCamelCase_ )
__a : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__a : Union[str, Any] = torch_tensor.T
__a : str = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
__a : Union[str, Any] = session.run(lowerCamelCase_ )
print(f'''Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}''' )
__a : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any]=None ):
__a : str = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Directory in which to save tensorflow model' )
__a : List[Any] = parser.parse_args(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 47 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
lowerCamelCase_ = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
lowerCamelCase_ = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = 'fnet'
def __init__( self : Optional[Any] , __magic_name__ : Tuple=32000 , __magic_name__ : str=768 , __magic_name__ : int=12 , __magic_name__ : Any=3072 , __magic_name__ : str="gelu_new" , __magic_name__ : Dict=0.1 , __magic_name__ : int=512 , __magic_name__ : int=4 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Tuple=1E-12 , __magic_name__ : List[str]=False , __magic_name__ : Union[str, Any]=512 , __magic_name__ : str=3 , __magic_name__ : Tuple=1 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = use_tpu_fourier_optimizations
lowerCAmelCase__ = tpu_short_seq_length
| 48 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bert-generation"
def __init__( self : Any , _lowercase : List[Any]=5_03_58 , _lowercase : str=10_24 , _lowercase : str=24 , _lowercase : int=16 , _lowercase : Any=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=0.02 , _lowercase : Dict=1E-12 , _lowercase : int=0 , _lowercase : Optional[int]=2 , _lowercase : Dict=1 , _lowercase : Dict="absolute" , _lowercase : Tuple=True , **_lowercase : str , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 49 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def __snake_case ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 0 |
"""simple docstring"""
def __A ( a_ :int) -> int:
if not isinstance(a_ , a_):
raise TypeError('''only integers accepted as input''')
else:
__a : Optional[Any] = str(abs(a_))
__a : List[Any] = [list(a_) for char in range(len(a_))]
for index in range(len(a_)):
num_transpositions[index].pop(a_)
return max(
int(''''''.join(list(a_))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 52 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Union[str, Any]=3_2 * 4 , lowerCAmelCase_ : int=3_2 * 6 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : int=3_2 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = is_training
__lowerCAmelCase = use_auxiliary_loss
__lowerCAmelCase = num_queries
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_size
__lowerCAmelCase = max_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = mask_feature_size
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase_ )
__lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase_ )
__lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase_ ) > 0.5
).float()
__lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase_ ) > 0.5).long()
__lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : str ) -> List[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple:
__lowerCAmelCase = output.encoder_hidden_states
__lowerCAmelCase = output.pixel_decoder_hidden_states
__lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=False ) -> Tuple:
with torch.no_grad():
__lowerCAmelCase = MaskFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = MaskFormerForInstanceSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
def comm_check_on_output(lowerCAmelCase_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
__lowerCAmelCase = model(
pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = MaskFormerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def lowercase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def lowercase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : str ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[Any] ) -> Any:
pass
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCAmelCase = MaskFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = (self.model_tester.min_size,) * 2
__lowerCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase_ ),
'mask_labels': torch.randn((2, 1_0, *size) , device=lowerCAmelCase_ ),
'class_labels': torch.zeros(2 , 1_0 , device=lowerCAmelCase_ ).long(),
}
__lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : str ) -> Union[str, Any]:
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case : Any = 1e-4
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Dict ) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
__lowerCAmelCase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
__lowerCAmelCase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCAmelCase = inputs['pixel_values'].to(lowerCAmelCase_ )
__lowerCAmelCase = [el.to(lowerCAmelCase_ ) for el in inputs['mask_labels']]
__lowerCAmelCase = [el.to(lowerCAmelCase_ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 53 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 0 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_=False , a_=False ) -> str:
"""simple docstring"""
__A = "backbone." if is_semantic else ""
__A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(F'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(F'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(F'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCAmelCase ( a_ , a_ , a_=False , a_=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__A = "backbone." if is_semantic else ""
# queries, keys and values
__A = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
__A = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
__A = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
__A = in_proj_weight[
: config.hidden_size, :
]
__A = q_bias
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__A = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
__A = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
__A = gamma_a
__A = gamma_a
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = dct.pop(a_ )
__A = val
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_=False ) -> List[Any]:
"""simple docstring"""
__A = False if "rvlcdip" in checkpoint_url else True
__A = BeitConfig(use_absolute_position_embeddings=a_ , use_mask_token=a_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__A = 1_0_2_4
__A = 4_0_9_6
__A = 2_4
__A = 1_6
# labels
if "rvlcdip" in checkpoint_url:
__A = 1_6
__A = "huggingface/label-files"
__A = "rvlcdip-id2label.json"
__A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
__A = {int(a_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__A = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )["model"]
__A = create_rename_keys(a_ , has_lm_head=a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , has_lm_head=a_ )
# load HuggingFace model
__A = BeitForMaskedImageModeling(a_ ) if has_lm_head else BeitForImageClassification(a_ )
model.eval()
model.load_state_dict(a_ )
# Check outputs on an image
__A = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=a_ )
__A = prepare_img()
__A = image_processor(images=a_ , return_tensors="pt" )
__A = encoding["pixel_values"]
__A = model(a_ )
__A = outputs.logits
# verify logits
__A = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(a_ ), "Shape of logits not as expected"
Path(a_ ).mkdir(exist_ok=a_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
if push_to_hub:
if has_lm_head:
__A = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__A = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=a_ , )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=a_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 55 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : int = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ["BeitFeatureExtractor"]
_a : int = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =XLMTokenizer
a : int =False
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase_: str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Union[str, Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = 'lower newer'
UpperCamelCase_: str = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: str = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_: List[Any] = 'lower'
UpperCamelCase_: Any = ['low', 'er</w>']
UpperCamelCase_: Optional[int] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: int = tokens + ['<unk>']
UpperCamelCase_: Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: Union[str, Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCamelCase_: Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 57 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__lowerCAmelCase : Optional[Any] = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""ยก""" ) , ord("""ยฌ""" ) + 1 ) ) + list(range(ord("""ยฎ""" ) , ord("""รฟ""" ) + 1 ) )
)
snake_case_ : Tuple = bs[:]
snake_case_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Dict = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = set()
snake_case_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : str = char
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(_lowercase )
snake_case_ : Any = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : List[Any] = bytes_to_unicode()
snake_case_ : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : str = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : List[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Dict = {}
snake_case_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : List[Any] = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : Optional[int] = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : List[str] = bigram
snake_case_ : List[str] = []
snake_case_ : Optional[int] = 0
while i < len(_lowercase ):
try:
snake_case_ : Any = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[str] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_lowercase )
snake_case_ : int = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : Tuple = get_pairs(_lowercase )
snake_case_ : Dict = """ """.join(_lowercase )
snake_case_ : Any = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = []
for token in re.findall(self.pat , _lowercase ):
snake_case_ : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : str = """""".join(_lowercase )
snake_case_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : int = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[int] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : Optional[Any] = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : Union[str, Any] = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
snake_case_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
snake_case_ : Any = """ """ + text
return (text, kwargs)
| 58 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=14 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Any=0.02 , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =parent
lowerCamelCase__: Dict =batch_size
lowerCamelCase__: Tuple =seq_length
lowerCamelCase__: Union[str, Any] =is_training
lowerCamelCase__: Tuple =use_input_mask
lowerCamelCase__: List[Any] =use_token_type_ids
lowerCamelCase__: Union[str, Any] =use_labels
lowerCamelCase__: Dict =vocab_size
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: str =rotary_dim
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: List[str] =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: str =None
lowerCamelCase__: str =vocab_size - 1
lowerCamelCase__: List[Any] =vocab_size - 1
lowerCamelCase__: int =vocab_size - 1
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: Optional[int] =None
if self.use_input_mask:
lowerCamelCase__: Any =random_attention_mask([self.batch_size, self.seq_length])
lowerCamelCase__: List[Any] =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: Optional[int] ={"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =20
lowerCamelCase__: str =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.init_cache(input_ids.shape[0] , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Dict =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowerCamelCase__: Optional[Any] =model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: List[Any] =model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_)
lowerCamelCase__: Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =20
lowerCamelCase__: int =model_class_name(UpperCAmelCase_)
lowerCamelCase__: int =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowerCamelCase__: int =model.init_cache(input_ids.shape[0] , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowerCamelCase__: List[str] =model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: int =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
lowerCamelCase__: Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =FlaxGPTJModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
@tooslow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left")
lowerCamelCase__: Any =tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_)
lowerCamelCase__: str =FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
lowerCamelCase__: Union[str, Any] =False
lowerCamelCase__: List[str] =model.config.eos_token_id
lowerCamelCase__: Optional[Any] =jax.jit(model.generate)
lowerCamelCase__: List[str] =jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id).sequences
lowerCamelCase__: Tuple =tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Dict =[
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowerCamelCase__: List[Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] ={k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__: Optional[Any] =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__: List[Any] =getattr(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: List[Any] =pt_inputs["input_ids"].shape
lowerCamelCase__: Optional[Any] =np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase_):
lowerCamelCase__: str =0
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: int =0
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Dict =pt_model_class(UpperCAmelCase_).eval()
lowerCamelCase__: Any =model_class(UpperCAmelCase_ , dtype=jnp.floataa)
lowerCamelCase__: str =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_)
lowerCamelCase__: List[str] =fx_state
with torch.no_grad():
lowerCamelCase__: Optional[int] =pt_model(**UpperCAmelCase_).to_tuple()
lowerCamelCase__: Dict =fx_model(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_)
lowerCamelCase__: str =fx_model_loaded(**UpperCAmelCase_).to_tuple()
self.assertEqual(
len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowerCamelCase__: Union[str, Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ={k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__: Optional[int] =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__: List[Any] =getattr(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =pt_model_class(UpperCAmelCase_).eval()
lowerCamelCase__: str =model_class(UpperCAmelCase_ , dtype=jnp.floataa)
lowerCamelCase__: Union[str, Any] =load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params)
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =pt_inputs["input_ids"].shape
lowerCamelCase__: List[Any] =np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase_):
lowerCamelCase__: str =0
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Dict =0
lowerCamelCase__: int =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__: List[Any] =pt_model(**UpperCAmelCase_).to_tuple()
lowerCamelCase__: Optional[int] =fx_model(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: str =pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Optional[int] =pt_model_loaded(**UpperCAmelCase_).to_tuple()
self.assertEqual(
len(UpperCAmelCase_) , len(UpperCAmelCase_) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: List[Any] =model_class_name.from_pretrained("EleutherAI/gpt-j-6B")
lowerCamelCase__: List[str] =model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase_)
| 59 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''pix2struct_text_model'''
lowerCamelCase_ : str = ['''past_key_values''']
lowerCamelCase_ : Any = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , __magic_name__=5_0244 , __magic_name__=768 , __magic_name__=64 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=12 , __magic_name__=32 , __magic_name__=128 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=1.0 , __magic_name__="gelu_new" , __magic_name__=0 , __magic_name__=False , __magic_name__=0 , __magic_name__=1 , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
snake_case_ : str = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = d_kv
snake_case_ : Dict = d_ff
snake_case_ : str = num_layers
snake_case_ : Tuple = num_heads
snake_case_ : int = relative_attention_num_buckets
snake_case_ : Optional[int] = relative_attention_max_distance
snake_case_ : int = dropout_rate
snake_case_ : Optional[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_factor
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = eos_token_id
snake_case_ : Dict = decoder_start_token_id
# for backwards compatibility
snake_case_ : Tuple = dense_act_fn
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , is_decoder=__magic_name__ , **__magic_name__ , )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[int] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
snake_case_ : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''pix2struct_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=768 , __magic_name__=2048 , __magic_name__=64 , __magic_name__=12 , __magic_name__=12 , __magic_name__="gelu_new" , __magic_name__=1e-6 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1e-10 , __magic_name__=1.0 , __magic_name__=4096 , __magic_name__=32 , __magic_name__=128 , **__magic_name__ , ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Any = hidden_size
snake_case_ : int = patch_embed_hidden_size
snake_case_ : List[Any] = d_ff
snake_case_ : str = dropout_rate
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Tuple = initializer_range
snake_case_ : Any = initializer_factor
snake_case_ : str = attention_dropout
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Optional[int] = dense_act_fn
snake_case_ : List[str] = seq_len
snake_case_ : Optional[int] = relative_attention_num_buckets
snake_case_ : List[str] = relative_attention_max_distance
snake_case_ : Tuple = d_kv
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[int] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''pix2struct'''
lowerCamelCase_ : List[str] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=1.0 , __magic_name__=0.02 , __magic_name__=False , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(tie_word_embeddings=__magic_name__ , is_encoder_decoder=__magic_name__ , **__magic_name__ )
if text_config is None:
snake_case_ : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
snake_case_ : Union[str, Any] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
snake_case_ : Optional[Any] = PixaStructTextConfig(**__magic_name__ )
snake_case_ : Optional[int] = PixaStructVisionConfig(**__magic_name__ )
snake_case_ : Optional[int] = self.text_config.decoder_start_token_id
snake_case_ : Tuple = self.text_config.pad_token_id
snake_case_ : List[str] = self.text_config.eos_token_id
snake_case_ : int = initializer_factor
snake_case_ : List[str] = initializer_range
snake_case_ : Tuple = self.initializer_range
snake_case_ : List[Any] = self.initializer_range
snake_case_ : int = is_vqa
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Dict = self.text_config.to_dict()
snake_case_ : Dict = self.vision_config.to_dict()
snake_case_ : int = self.__class__.model_type
return output
| 60 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "ernie_m"
snake_case__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 250_002 , SCREAMING_SNAKE_CASE__ : int = 768 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 3_072 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 514 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : float = 1e-0_5 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : str=0.0 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = act_dropout
| 61 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''new-model'''
if is_tf_available():
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Dict = "bert-base-cased"
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = "bert-base-cased"
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Union[str, Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Tuple ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Union[str, Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
@require_tensorflow_probability
def _A ( self : Optional[int] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_4410 )
def _A ( self : Optional[Any] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE : str = ["FunnelBaseModel"]
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModel.from_config(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase_ ):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Optional[int] = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE : Any = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE : Dict = auto_class.from_config(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = auto_class.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _A ( self : Any ):
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("bert-base" )
def _A ( self : Optional[int] ):
with self.assertRaisesRegex(
UpperCAmelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE : int = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def _A ( self : str ):
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _A ( self : Dict ):
with self.assertRaisesRegex(UpperCAmelCase_ , "Use `from_pt=True` to load this model" ):
SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _A ( self : Optional[int] ):
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 62 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a : Optional[Any] = random.Random()
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None ):
if rng is None:
__UpperCAmelCase : int = global_rng
__UpperCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __lowercase : List[Any] , __lowercase : Dict=7 , __lowercase : Tuple=400 , __lowercase : Optional[int]=2000 , __lowercase : List[Any]=1 , __lowercase : List[str]=0.0 , __lowercase : str=16000 , __lowercase : Tuple=True , __lowercase : List[str]=True , ) -> Dict:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Union[str, Any] = min_seq_length
__UpperCAmelCase : Any = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Optional[Any] = padding_value
__UpperCAmelCase : Tuple = sampling_rate
__UpperCAmelCase : Optional[Any] = return_attention_mask
__UpperCAmelCase : int = do_normalize
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self : Optional[int] , __lowercase : int=False , __lowercase : str=False ) -> int:
def _flatten(__lowercase : Optional[Any] ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__UpperCAmelCase : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase : str = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = WavaVecaFeatureExtractor
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractionTester(self )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any ) -> List[str]:
self.assertTrue(np.all(np.mean(__lowercase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase ( self : Dict ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Optional[Any] = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__UpperCAmelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test batched
__UpperCAmelCase : int = feat_extract(__lowercase , return_tensors="""np""" ).input_values
__UpperCAmelCase : Optional[int] = feat_extract(__lowercase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCAmelCase : List[Any] = np.asarray(__lowercase )
__UpperCAmelCase : int = feat_extract(__lowercase , return_tensors="""np""" ).input_values
__UpperCAmelCase : Optional[Any] = feat_extract(__lowercase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
__UpperCAmelCase : Any = [None, 1600, None]
for max_length, padding in zip(__lowercase , __lowercase ):
__UpperCAmelCase : Optional[Any] = feat_extract(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors="""np""" )
__UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Optional[int] = range(800 , 1400 , 200 )
__UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
__UpperCAmelCase : Union[str, Any] = ["""longest""", """max_length""", """do_not_pad"""]
__UpperCAmelCase : Dict = [None, 1600, None]
for max_length, padding in zip(__lowercase , __lowercase ):
__UpperCAmelCase : List[str] = feat_extract(__lowercase , max_length=__lowercase , padding=__lowercase )
__UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Dict = feat_extract(
__lowercase , truncation=__lowercase , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
__UpperCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Union[str, Any] = feat_extract(
__lowercase , truncation=__lowercase , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
__UpperCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCAmelCase : Optional[Any] = feat_extract(
__lowercase , truncation=__lowercase , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
__UpperCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCAmelCase ( self : str ) -> int:
import torch
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Optional[Any] = np.random.rand(100 ).astype(np.floataa )
__UpperCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCAmelCase : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCAmelCase ( self : Tuple ) -> Dict:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowercase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 63 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria',
'Secretarul General Ban Ki-moon declarฤ cฤ rฤspunsul sฤu la intensificarea sprijinului militar al Rusiei'
' pentru Siria este cฤ "nu existฤ o soluลฃie militarฤ" la conflictul de aproape cinci ani ลi cฤ noi arme nu vor'
' face decรขt sฤ รฎnrฤutฤลฃeascฤ violenลฃele ลi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
UpperCAmelCase__ : List[str] = do_thumbnail
UpperCAmelCase__ : Optional[int] = do_align_axis
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : Tuple = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : List[Any] = image_std
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self )
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"""do_resize""" ) )
self.assertTrue(hasattr(A ,"""size""" ) )
self.assertTrue(hasattr(A ,"""do_thumbnail""" ) )
self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) )
self.assertTrue(hasattr(A ,"""do_pad""" ) )
self.assertTrue(hasattr(A ,"""do_normalize""" ) )
self.assertTrue(hasattr(A ,"""image_mean""" ) )
self.assertTrue(hasattr(A ,"""image_std""" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} )
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@is_flaky()
def __lowercase ( self : int ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : Any ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 65 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('ยก' ) ,ord('ยฌ' ) + 1 ) ) + list(range(ord('ยฎ' ) ,ord('รฟ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowercase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowerCAmelCase , cache_dir=_lowerCAmelCase )
_lowercase : str = [t[-1] for t in os.walk(os.path.join(_lowerCAmelCase , os.listdir(_lowerCAmelCase )[0] , 'snapshots' ) )]
_lowercase : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowerCAmelCase )
_lowercase : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Optional[Any] = jax.random.PRNGKey(0 )
_lowercase : Optional[Any] = 4
_lowercase : Any = jax.device_count()
_lowercase : Dict = num_samples * [prompt]
_lowercase : List[str] = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : Tuple = replicate(_lowerCAmelCase )
_lowercase : Optional[Any] = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[str] = shard(_lowerCAmelCase )
_lowercase : List[Any] = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
_lowercase : Any = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowerCAmelCase ) == num_samples
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowerCAmelCase )
_lowercase : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Tuple = jax.random.PRNGKey(0 )
_lowercase : Optional[int] = 5_0
_lowercase : Optional[Any] = jax.device_count()
_lowercase : List[str] = num_samples * [prompt]
_lowercase : Dict = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : str = replicate(_lowerCAmelCase )
_lowercase : Optional[int] = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = shard(_lowerCAmelCase )
_lowercase : Dict = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def __a ( self ):
_lowercase , _lowercase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase )
_lowercase : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Any = jax.random.PRNGKey(0 )
_lowercase : Optional[int] = 5_0
_lowercase : Optional[Any] = jax.device_count()
_lowercase : Union[str, Any] = num_samples * [prompt]
_lowercase : Any = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : Optional[Any] = replicate(_lowerCAmelCase )
_lowercase : Any = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = shard(_lowerCAmelCase )
_lowercase : Union[str, Any] = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_lowercase : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Union[str, Any] = jax.random.PRNGKey(0 )
_lowercase : Optional[int] = 5_0
_lowercase : List[str] = jax.device_count()
_lowercase : List[Any] = num_samples * [prompt]
_lowercase : List[str] = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : Union[str, Any] = replicate(_lowerCAmelCase )
_lowercase : List[str] = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = shard(_lowerCAmelCase )
_lowercase : Dict = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def __a ( self ):
_lowercase : str = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
_lowercase , _lowercase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
_lowercase : Any = scheduler.create_state()
_lowercase : List[str] = scheduler_state
_lowercase : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : int = 5_0
_lowercase : List[str] = jax.device_count()
_lowercase : Any = num_samples * [prompt]
_lowercase : int = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : str = replicate(_lowerCAmelCase )
_lowercase : str = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = shard(_lowerCAmelCase )
_lowercase : Dict = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def __a ( self ):
_lowercase : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Optional[Any] = jax.device_count()
_lowercase : Union[str, Any] = num_samples * [prompt]
_lowercase : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , _lowerCAmelCase )
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase , )
_lowercase : Optional[Any] = replicate(_lowerCAmelCase )
_lowercase : Union[str, Any] = pipeline.prepare_inputs(_lowerCAmelCase )
_lowercase : Optional[Any] = shard(_lowerCAmelCase )
_lowercase : Tuple = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowercase : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase , use_memory_efficient_attention=_lowerCAmelCase , )
_lowercase : Optional[Any] = replicate(_lowerCAmelCase )
_lowercase : str = pipeline.prepare_inputs(_lowerCAmelCase )
_lowercase : int = shard(_lowerCAmelCase )
_lowercase : List[Any] = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowercase : Dict = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 66 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 67 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _A ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'bit'
lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
lowerCamelCase : List[Any] = ['SAME', 'VALID']
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : Tuple=[256, 512, 1024, 2048] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE : Optional[Any]="preactivation" , __SCREAMING_SNAKE_CASE : int="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[str]:
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__UpperCAmelCase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__UpperCAmelCase =num_channels
__UpperCAmelCase =embedding_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =layer_type
__UpperCAmelCase =hidden_act
__UpperCAmelCase =global_padding
__UpperCAmelCase =num_groups
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =embedding_dynamic_padding
__UpperCAmelCase =output_stride
__UpperCAmelCase =width_factor
__UpperCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 68 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a : int = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
if "://" in dataset_path:
__snake_case = dataset_path.split("://" )[1]
return dataset_path
def __UpperCAmelCase ( _UpperCAmelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( _UpperCAmelCase : fsspec.AbstractFileSystem , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> List[str]:
__snake_case = not is_remote_filesystem(_UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCAmelCase ) , fs._strip_protocol(_UpperCAmelCase ) )
else:
fs.mv(_UpperCAmelCase , _UpperCAmelCase , recursive=_UpperCAmelCase )
def __UpperCAmelCase ( ) -> None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__snake_case = None
__snake_case = None
__snake_case = threading.Lock()
| 69 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
lowerCamelCase_ = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
lowerCamelCase_ = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ProphetNetTokenizer
UpperCamelCase = False
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Union[str, Any] , A_ : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['hรคllo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['HรคLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase_ = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
lowerCamelCase_ = tokenizer(A_ , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : int ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 70 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
if index == r:
for j in range(_SCREAMING_SNAKE_CASE ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ : Union[str, Any] = arr[i]
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 71 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 0 |
'''simple docstring'''
_UpperCAmelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase =(
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowercase_ )}'
)
raise ValueError(lowercase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Tuple = CodeGenTokenizer
_lowercase : Tuple = CodeGenTokenizerFast
_lowercase : Optional[int] = True
_lowercase : List[str] = {'''add_prefix_space''': True}
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
SCREAMING_SNAKE_CASE = dict(zip(a , range(len(a))))
SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(a))
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Any:
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a , add_prefix_space=a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(add_prefix_space=a)
SCREAMING_SNAKE_CASE = 'lower newer'
# Testing tokenization
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a , add_prefix_space=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(add_prefix_space=a)
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_prefix_space=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
# Testing the unknown token
SCREAMING_SNAKE_CASE = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a) , a)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> str:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE__ ( self , a=15) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(a , **a)
# Simple input
SCREAMING_SNAKE_CASE = 'This is a simple input'
SCREAMING_SNAKE_CASE = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length')
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length')
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length')
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length')
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>')
# Simple input
SCREAMING_SNAKE_CASE = 'This is a simple input'
SCREAMING_SNAKE_CASE = ['This is a simple input looooooooong', 'This is a simple input']
SCREAMING_SNAKE_CASE = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
SCREAMING_SNAKE_CASE = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = tokenizer(a , padding='max_length' , max_length=30 , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , truncate=a , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(*a , padding='max_length' , max_length=60 , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , truncate=a , return_tensors='np')
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s['input_ids'])
self.assertTrue(0 in out_s['attention_mask'])
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0])
self.assertFalse(0 in out_sa['attention_mask'][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1])
self.assertTrue(0 in out_sa['attention_mask'][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p['input_ids'])
self.assertTrue(0 in out_p['attention_mask'])
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0])
self.assertFalse(0 in out_pa['attention_mask'][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1])
self.assertTrue(0 in out_pa['attention_mask'][1])
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = '$$$'
SCREAMING_SNAKE_CASE = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a , add_bos_token=a)
SCREAMING_SNAKE_CASE = 'This is a simple input'
SCREAMING_SNAKE_CASE = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE = tokenizer(a)
SCREAMING_SNAKE_CASE = tokenizer(a)
self.assertEqual(out_s.input_ids[0] , a)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
SCREAMING_SNAKE_CASE = tokenizer.decode(out_s.input_ids)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , a)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
SCREAMING_SNAKE_CASE = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
SCREAMING_SNAKE_CASE = '\nif len_a > len_b: result = a\nelse: result = b'
SCREAMING_SNAKE_CASE = tokenizer.encode(a)
SCREAMING_SNAKE_CASE = ['^#', re.escape('<|endoftext|>'), '^\'\'\'', '^"""', '\n\n\n']
SCREAMING_SNAKE_CASE = tokenizer.decode(a , truncate_before_pattern=a)
self.assertEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
pass
| 73 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''data2vec-vision'''
def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : Any = num_channels
__SCREAMING_SNAKE_CASE : List[str] = use_mask_token
__SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias
__SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : str = out_indices
__SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return 1e-4
| 74 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 0 |
'''simple docstring'''
from itertools import product
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : Optional[Any] = sides_number
UpperCAmelCase__ : Optional[Any] = max_face_number * dice_number
UpperCAmelCase__ : Optional[int] = [0] * (max_total + 1)
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : List[str] = range(lowerCAmelCase__ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase__ , repeat=lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = sum(lowerCAmelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def a__ ( ) -> float:
UpperCAmelCase__ : int = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase__ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : str = 9
UpperCAmelCase__ : str = 4 * 9
UpperCAmelCase__ : Tuple = 6
for peter_total in range(lowerCAmelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase__ : str = (4**9) * (6**6)
UpperCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number
UpperCAmelCase__ : Any = round(lowerCAmelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 0 |
"""simple docstring"""
a_ = tuple[float, float, float]
a_ = tuple[float, float, float]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[str] = end_pointa[0] - end_pointa[0]
__lowercase : str = end_pointa[1] - end_pointa[1]
__lowercase : str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowercase : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowercase : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return tuple(round(__UpperCamelCase , __UpperCamelCase ) for x in vector ) == (0, 0, 0)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 10 ):
__lowercase : Dict = create_vector(__UpperCamelCase , __UpperCamelCase )
__lowercase : Optional[int] = create_vector(__UpperCamelCase , __UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
| 76 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class a__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
def __call__( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[str] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__UpperCAmelCase : int = 1
__UpperCAmelCase : str = self.unet(UpperCamelCase_ , UpperCamelCase_).sample
__UpperCAmelCase : List[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
__UpperCAmelCase : str = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_)
return result
| 77 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict ={
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """t5"""
a__ : List[str] = ["""past_key_values"""]
a__ : List[str] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self : Tuple , __a : str=32128 , __a : Union[str, Any]=512 , __a : Union[str, Any]=64 , __a : List[str]=2048 , __a : List[str]=6 , __a : List[Any]=None , __a : str=8 , __a : Optional[int]=32 , __a : Union[str, Any]=128 , __a : Tuple=0.1 , __a : Union[str, Any]=1E-6 , __a : Optional[Any]=1.0 , __a : str="relu" , __a : List[Any]=True , __a : Tuple=True , __a : Optional[int]=0 , __a : Optional[int]=1 , **__a : str , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Any ):
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
@property
def _lowercase (self : int ):
return 13
| 78 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE__ : str = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyรจ""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmรฅl""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase = 'translator'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSeqaSeqLM
__lowerCamelCase = LANGUAGE_CODES
__lowerCamelCase = ['text', 'text', 'text']
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
UpperCAmelCase__ : Union[str, Any] = self.lang_to_code[src_lang]
UpperCAmelCase__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCAmelCase , return_tensors="""pt""" , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.model.generate(**_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCAmelCase )
| 79 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , _lowerCAmelCase : int = 101 ) -> Any:
"""simple docstring"""
__lowercase = length
def __len__( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
return i
class __UpperCamelCase :
def __call__( self : List[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
return {"input_ids": torch.tensor(_lowerCAmelCase ), "labels": torch.tensor(_lowerCAmelCase )}
class __UpperCamelCase ( nn.Module ):
def __init__( self : List[str] ) -> Dict:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase = nn.Linear(120 , 80 )
def _a ( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __UpperCamelCase ( _lowerCAmelCase ):
@require_torch_neuroncore
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F'--output_dir {output_dir}'.split()
__lowercase = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __UpperCamelCase ( _lowerCAmelCase ):
@require_torch_multi_gpu
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F'--output_dir {output_dir}'.split()
__lowercase = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCamelCase : Dict = HfArgumentParser((TrainingArguments,))
__UpperCamelCase : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCamelCase : str = DummyDataset(dataset_length)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = list(range(len(lowerCamelCase ) ) )
__lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__UpperCamelCase : int = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCamelCase : Dict = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase : List[str] = 2
__UpperCamelCase : Optional[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase : List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase : Tuple = None
| 80 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['transformers', 'torch', 'note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 42 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0 ):
__snake_case , __snake_case : Optional[Any] = 1, 1
__snake_case : Tuple = 2
while True:
__snake_case : List[str] = 0
__snake_case : Optional[int] = fa + fa
__snake_case , __snake_case : str = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 81 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_UpperCAmelCase )
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
def extract(*_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ):
class lowercase__ :
'''simple docstring'''
def __init__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = torch.ones([0] )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ = 77
UpperCAmelCase_ = self.dummy_image.to(_UpperCAmelCase )
UpperCAmelCase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase )
UpperCAmelCase_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_UpperCAmelCase , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.dummy_cond_unet
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ = 77
UpperCAmelCase_ = self.dummy_image.to(_UpperCAmelCase )
# put models in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = vae.half()
UpperCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase )
UpperCAmelCase_ = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=_UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ = init_image.resize((760, 504) )
UpperCAmelCase_ = "BAAI/AltDiffusion"
UpperCAmelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ = init_image.resize((768, 512) )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCAmelCase_ = "BAAI/AltDiffusion"
UpperCAmelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 82 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __snake_case ( torch.nn.Module):
def __init__( self : Any , __lowerCAmelCase : Any="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__lowerCAmelCase , self ).__init__()
_lowerCamelCase : Union[str, Any] = AutoModel.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.nn.CosineSimilarity(3 , 1E-08 )
_lowerCamelCase : str = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.bert(**__lowerCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Any = W_supports['''sizes'''].tolist()
_lowerCamelCase : str = W_supports['''start_token_id'''].item()
_lowerCamelCase : Optional[int] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowerCamelCase : List[Any] = self.BERT(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.BERT(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
_lowerCamelCase : Tuple = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__lowerCAmelCase ):
if i == 0:
_lowerCamelCase : List[str] = 0
else:
_lowerCamelCase : Tuple = support_sizes[i - 1]
_lowerCamelCase : List[Any] = S[s : s + size][start_token_masks[s : s + size]]
_lowerCamelCase : Tuple = S[s : s + size][end_token_masks[s : s + size]]
_lowerCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_lowerCamelCase : List[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_lowerCamelCase : Optional[Any] = torch.vstack((p_starts, p_start) )
_lowerCamelCase : List[str] = torch.vstack((p_ends, p_end) )
else:
_lowerCamelCase : List[Any] = p_start
_lowerCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 83 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 0 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = 0
lowercase = 0
while num > 0:
lowercase = num % 8
lowercase = octal + (remainder * math.floor(math.pow(10 , __SCREAMING_SNAKE_CASE ) ))
counter += 1
lowercase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( ):
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 84 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria',
'Secretarul General Ban Ki-moon declarฤ cฤ rฤspunsul sฤu la intensificarea sprijinului militar al Rusiei'
' pentru Siria este cฤ "nu existฤ o soluลฃie militarฤ" la conflictul de aproape cinci ani ลi cฤ noi arme nu vor'
' face decรขt sฤ รฎnrฤutฤลฃeascฤ violenลฃele ลi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 0 |
from __future__ import annotations
from math import gcd
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int = 2 ,__UpperCamelCase : int = 1 ,__UpperCamelCase : int = 3 ,):
"""simple docstring"""
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> int:
return (pow(__UpperCamelCase ,2 ) + step) % modulus
for _ in range(__UpperCamelCase ):
# These track the position within the cycle detection logic.
A_ = seed
A_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = rand_fn(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A_ = gcd(hare - tortoise ,__UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__a :int = parser.parse_args()
__a :List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__a :List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 86 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42 | 0 |
from scipy.stats import spearmanr
import datasets
_lowerCamelCase : List[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_lowerCamelCase : Any = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_lowerCamelCase : List[Any] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=False) ->int:
'''simple docstring'''
A__ = spearmanr(UpperCAmelCase__ , UpperCAmelCase__)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 87 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
A_ = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
A_ = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('ยก' ) ,ord('ยฌ' ) + 1 ) ) + list(range(ord('ยฎ' ) ,ord('รฟ' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCamelCase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ' '.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = word
return word
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ''.join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
lowerCamelCase_ = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ' ' + text
return (text, kwargs)
| 42 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("""lowercase""" , SCREAMING_SNAKE_CASE) != do_lower_case
or pre_tok_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE) != strip_accents
):
_lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type"""))
_lowerCamelCase : List[str] = do_lower_case
_lowerCamelCase : str = strip_accents
_lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = do_lower_case
def __getstate__( self) -> str:
_lowerCamelCase : Any = self.__dict__.copy()
_lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Tuple = d
_lowerCamelCase : List[Any] = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCamelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Any:
_lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
_lowerCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
| 88 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowerCamelCase( _a ):
lowercase_ : Dict = """Wav2Vec2FeatureExtractor"""
lowercase_ : Tuple = """AutoTokenizer"""
def __init__( self, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase)
_lowercase : Any = self.feature_extractor
_lowercase : Dict = False
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase, **lowerCamelCase)
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ', lowerCamelCase, )
_lowercase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase, **lowerCamelCase)
_lowercase : int = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase, **lowerCamelCase)
return cls(feature_extractor=lowerCamelCase, tokenizer=lowerCamelCase)
def __call__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase, **lowerCamelCase)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
_lowercase : Any = kwargs.pop('raw_speech')
else:
_lowercase : Union[str, Any] = kwargs.pop('audio', lowerCamelCase)
_lowercase : int = kwargs.pop('sampling_rate', lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('text', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : Optional[int] = args[0]
_lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
_lowercase : Union[str, Any] = self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : List[str] = self.tokenizer(lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Any = encodings['input_ids']
return inputs
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase, **lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('input_features', lowerCamelCase)
_lowercase : Dict = kwargs.pop('labels', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : List[str] = args[0]
_lowercase : List[Any] = args[1:]
if input_features is not None:
_lowercase : Optional[int] = self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
if labels is not None:
_lowercase : Dict = self.tokenizer.pad(lowerCamelCase, **lowerCamelCase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowercase : Any = labels['input_ids']
return input_features
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@contextmanager
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
_lowercase : Optional[int] = True
_lowercase : int = self.tokenizer
yield
_lowercase : Any = self.feature_extractor
_lowercase : List[Any] = False
| 89 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' )
def _UpperCamelCase ( ) -> int | None:
for base_num in range(99_99 ,49_99 ,-1 ):
lowerCamelCase_ = 10_00_02 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(3_33 ,99 ,-1 ):
lowerCamelCase_ = 1_00_20_03 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0.01 , lowerCamelCase_=10_00 ) -> Union[str, Any]:
lowerCAmelCase__ = p_stop
lowerCAmelCase__ = max_length
def __iter__( self ) -> Any:
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase__ = random.random() < self.p_stop
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True ) -> Optional[Any]:
lowerCAmelCase__ = [
BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
for i in range(2 )
]
lowerCAmelCase__ = [list(lowerCamelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase_ ) for shard in batch_sampler_shards] , [len(lowerCamelCase_ ) for e in expected] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase__ = [BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , even_batches=lowerCamelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=False ) -> str:
random.seed(lowerCamelCase_ )
lowerCAmelCase__ = list(lowerCamelCase_ )
lowerCAmelCase__ = [
IterableDatasetShard(
lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ , num_processes=lowerCamelCase_ , process_index=lowerCamelCase_ , split_batches=lowerCamelCase_ , )
for i in range(lowerCamelCase_ )
]
lowerCAmelCase__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase_ )
iterable_dataset_lists.append(list(lowerCamelCase_ ) )
lowerCAmelCase__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(len(lowerCamelCase_ ) % shard_batch_size == 0 )
lowerCAmelCase__ = []
for idx in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
reference += reference
self.assertListEqual(lowerCamelCase_ , reference[: len(lowerCamelCase_ )] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = 42
lowerCAmelCase__ = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Edge case with a very small dataset
lowerCAmelCase__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = SkipBatchSampler(lowerCamelCase_ , 2 )
self.assertListEqual(list(lowerCamelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase__ = skip_first_batches(lowerCamelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
Accelerator()
lowerCAmelCase__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 90 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] ,**A_ : List[Any] ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> bool:
return self.n_gpu > 0
| 91 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
lowerCamelCase_ = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
lowerCamelCase_ = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'ใใใซใกใฏใไธ็ใ'
lowerCamelCase_ = 'ใใใฐใใฏใใบ็ใ๐'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
lowerCamelCase_ = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : List[str] =image_size
lowercase : List[Any] =num_channels
lowercase : Union[str, Any] =num_stages
lowercase : int =hidden_sizes
lowercase : Any =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : List[Any] =intermediate_size
lowercase : int =hidden_act
lowercase : Union[str, Any] =num_labels
lowercase : Optional[int] =initializer_range
lowercase : int =out_features
lowercase : List[str] =out_indices
lowercase : str =scope
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[int] =model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] =config_and_inputs
lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Optional[int] =True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
lowercase : Dict =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : List[Any] =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : List[Any] =False
lowercase : Any =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : Any =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase : int =model(**UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
lowercase : int =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : List[str] =prepare_img()
lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = text_embeddings.shape
lowerCamelCase_ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = 42
if negative_prompt is None:
lowerCamelCase_ = ['']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
f''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = text_input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ = uncond_embeddings.shape[1]
lowerCamelCase_ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents_reference.to(self.device )
lowerCamelCase_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase_ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase_ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase_ = 0 if dx < 0 else dx
lowerCamelCase_ = 0 if dy < 0 else dy
lowerCamelCase_ = max(-dx , 0 )
lowerCamelCase_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase_ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='pt' ).to(
self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase_ = None
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Any = pipe(
image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
lowerCAmelCase__ :Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30_522, type=int)
SCREAMING_SNAKE_CASE = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
SCREAMING_SNAKE_CASE = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
SCREAMING_SNAKE_CASE = Counter()
for tk_ids in data:
counter.update(tk_ids)
SCREAMING_SNAKE_CASE = [0] * args.vocab_size
for k, v in counter.items():
SCREAMING_SNAKE_CASE = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 94 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
UpperCAmelCase_ : int = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCAmelCase_ : Optional[int] = model.state_dict()
def to_tf_var_name(A__ ):
for patt, repl in iter(A__ ):
UpperCAmelCase_ : Optional[int] = name.replace(A__ ,A__ )
return F"""bert/{name}"""
def create_tf_var(A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ : List[Any] = tf.get_variable(dtype=A__ ,shape=tensor.shape ,name=A__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ : Tuple = to_tf_var_name(A__ )
UpperCAmelCase_ : Optional[int] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ : List[str] = torch_tensor.T
UpperCAmelCase_ : List[str] = create_tf_var(tensor=A__ ,name=A__ ,session=A__ )
tf.keras.backend.set_value(A__ ,A__ )
UpperCAmelCase_ : Optional[Any] = session.run(A__ )
print(F"""Successfully created {tf_name}: {np.allclose(A__ ,A__ )}""" )
UpperCAmelCase_ : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(A__ ,os.path.join(A__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def snake_case ( A__=None ):
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=A__ ,required=A__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=A__ ,default=A__ ,required=A__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=A__ ,required=A__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=A__ ,required=A__ ,help="Directory in which to save tensorflow model" )
UpperCAmelCase_ : str = parser.parse_args(A__ )
UpperCAmelCase_ : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=A__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 95 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCamelCase = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__lowerCamelCase = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__lowerCamelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__lowerCamelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__lowerCamelCase = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__lowerCamelCase = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__lowerCamelCase = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def a ( ) -> Any:
__magic_name__, __magic_name__: Optional[int] = randrange(len(__UpperCAmelCase ) ), randrange(len(__UpperCAmelCase ) )
__magic_name__: Optional[int] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__magic_name__, __magic_name__: Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a ( __UpperCAmelCase : int = 1_0_0 ) -> Union[str, Any]:
return (generate_random_hand() for _ in range(__UpperCAmelCase ))
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> str:
assert PokerHand(__UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
assert PokerHand(__UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCAmelCase )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__magic_name__: Any = PokerHand(__UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> Optional[Any]:
assert PokerHand(__UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
assert PokerHand(__UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , __UpperCAmelCase )
def a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> List[str]:
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Tuple:
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
def a ( ) -> Optional[Any]:
__magic_name__: Union[str, Any] = [PokerHand(__UpperCAmelCase ) for hand in SORTED_HANDS]
__magic_name__: int = poker_hands.copy()
shuffle(__UpperCAmelCase )
__magic_name__: Dict = chain(sorted(__UpperCAmelCase ) )
for index, hand in enumerate(__UpperCAmelCase ):
assert hand == poker_hands[index]
def a ( ) -> Union[str, Any]:
# Test that five high straights are compared correctly.
__magic_name__: List[Any] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a ( ) -> List[Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__magic_name__: Union[str, Any] = PokerHand("""2C 4S AS 3D 5C""" )
__magic_name__: int = True
__magic_name__: Optional[Any] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a ( ) -> List[str]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__magic_name__: List[str] = 0
__magic_name__: Optional[int] = os.path.abspath(os.path.dirname(__UpperCAmelCase ) )
__magic_name__: List[Any] = os.path.join(__UpperCAmelCase , """poker_hands.txt""" )
with open(__UpperCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__: Optional[int] = line[:1_4].strip()
__magic_name__: Optional[Any] = line[1_5:].strip()
__magic_name__, __magic_name__: Tuple = PokerHand(__UpperCAmelCase ), PokerHand(__UpperCAmelCase )
__magic_name__: Any = player.compare_with(__UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 96 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ['image_processor']
a :Tuple = 'SamImageProcessor'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor
lowercase_ = -1_0
lowercase_ = self.image_processor.size['''longest_edge''']
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> BatchEncoding:
lowercase_ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ = encoding_image_processor['''original_sizes''']
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ): # Checks if Torch or TF tensor
lowercase_ = original_sizes.numpy()
lowercase_ , lowercase_ , lowercase_ = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
lowercase_ = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]="pt" , ) -> Dict:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase_ , lowercase_ = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
lowercase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
lowercase_ = max([point.shape[0] for point in input_points] )
lowercase_ = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
lowercase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = processed_input_points
return input_points, input_labels
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> np.ndarray:
lowercase_ , lowercase_ = original_size
lowercase_ , lowercase_ = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
lowercase_ = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 2 , 2 )
lowercase_ = coords[..., 0] * (new_w / old_w)
lowercase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 4 )
return coords
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> List[str]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ): # Checks for TF or Torch tensor
lowercase_ = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
lowercase_ = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ):
lowercase_ = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Input labels must be a list of list integers.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
lowercase_ = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ):
lowercase_ = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
lowercase_ = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 97 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[str] = 'new-model'
if is_tf_available():
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = NewModelConfig
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = '''bert-base-cased'''
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''bert-base-cased'''
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_probability
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14410 )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14410 )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = copy.deepcopy(model.config )
_UpperCamelCase = ['''FunnelBaseModel''']
_UpperCamelCase = TFAutoModel.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , lowerCAmelCase__ )
_UpperCamelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCamelCase = BertModelTester(self ).get_config()
_UpperCamelCase = NewModelConfig(**tiny_config.to_dict() )
_UpperCamelCase = auto_class.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = auto_class.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = TFAutoModel.from_pretrained('''bert-base''' )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = TFAutoModel.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
_UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(lowerCAmelCase__ , '''Use `from_pt=True` to load this model''' ):
_UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
_UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_UpperCamelCase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
_UpperCamelCase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 98 |
'''simple docstring'''
from math import isclose, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]:
lowerCamelCase_ = point_y / 4 / point_x
lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase_ = outgoing_gradient**2 + 4
lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus
lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int:
lowerCamelCase_ = 0
lowerCamelCase_ = first_x_coord
lowerCamelCase_ = first_y_coord
lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """dpr"""
def __init__( self , __A=30522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A = 0 , **__A , ):
super().__init__(pad_token_id=__A , **__A )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 99 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 0 |
def __snake_case ( lowerCAmelCase_ = 1_0**9 ) -> int:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE__ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.