code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
A__ = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("""utf-8""").split()
A__ = """|""".join(sys.argv[1:])
A__ = re.compile(Rf"^({joined_dirs}).*?\.py$")
A__ = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 82 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A__ = logging.getLogger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''summarization'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ROUGE_KEYS
__lowerCamelCase = '''rouge2'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_snake_case , num_labels=_snake_case , mode=self.mode , **_snake_case )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
_lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
_lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
_lowerCAmelCase = 0
_lowerCAmelCase = defaultdict(_snake_case )
_lowerCAmelCase = self.config.model_type
_lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
_lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
_lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_lowerCAmelCase = get_git_info()["""repo_sha"""]
_lowerCAmelCase = hparams.num_workers
_lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _snake_case ):
_lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowerCAmelCase = self.decoder_start_token_id
_lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
_lowerCAmelCase = False
_lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowerCAmelCase = self.hparams.eval_max_gen_length
else:
_lowerCAmelCase = self.model.config.max_length
_lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
_lowerCAmelCase = True
return readable_batch
def snake_case ( self , _snake_case , **_snake_case ):
"""simple docstring"""
return self.model(_snake_case , **_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.pad_token_id
_lowerCAmelCase , _lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
_lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , _snake_case ):
_lowerCAmelCase = self.model._shift_right(_snake_case )
else:
_lowerCAmelCase = shift_tokens_right(_snake_case , _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowerCAmelCase = decoder_input_ids
self.save_readable_batch(_snake_case )
_lowerCAmelCase = self(_snake_case , attention_mask=_snake_case , decoder_input_ids=_snake_case , use_cache=_snake_case )
_lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
_lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_lowerCAmelCase = nn.functional.log_softmax(_snake_case , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = label_smoothed_nll_loss(
_snake_case , _snake_case , self.hparams.label_smoothing , ignore_index=_snake_case )
return (loss,)
@property
def snake_case ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
# tokens per batch
_lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].shape[0]
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case , _snake_case="val" ):
"""simple docstring"""
self.step_count += 1
_lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_lowerCAmelCase = losses["""loss"""]
_lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
_lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowerCAmelCase = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
_lowerCAmelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
_lowerCAmelCase = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
_lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_rouge(_snake_case , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=_snake_case , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
_lowerCAmelCase = self.ids_to_clean_text(_snake_case )
_lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
_lowerCAmelCase = self.calc_generative_metrics(_snake_case , _snake_case )
_lowerCAmelCase = np.mean(lmap(_snake_case , _snake_case ) )
base_metrics.update(gen_time=_snake_case , gen_len=_snake_case , preds=_snake_case , target=_snake_case , **_snake_case )
return base_metrics
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.validation_epoch_end(_snake_case , prefix="""test""" )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.n_obs[type_path]
_lowerCAmelCase = self.target_lens[type_path]
_lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=_snake_case , n_obs=_snake_case , max_target_length=_snake_case , **self.dataset_kwargs , )
return dataset
def snake_case ( self , _snake_case , _snake_case , _snake_case = False ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_sortish_sampler(_snake_case , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_sampler=_snake_case , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=_snake_case )
return dataloader
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case ( _snake_case , _snake_case ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_snake_case , _snake_case )
add_generic_args(_snake_case , _snake_case )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--max_tokens_per_batch""" , type=_snake_case , default=_snake_case )
parser.add_argument("""--logger_name""" , type=_snake_case , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=_snake_case , default=500 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=_snake_case , default="""summarization""" , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=_snake_case , default=0.0 , required=_snake_case )
parser.add_argument("""--src_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--tgt_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--eval_beams""" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
"""--val_metric""" , type=_snake_case , default=_snake_case , required=_snake_case , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=_snake_case , default=_snake_case , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=_snake_case , default=1 , required=_snake_case , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=_snake_case , default=-1 , required=_snake_case , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''translation'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ['''bleu''']
__lowerCamelCase = '''bleu'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(_snake_case , **_snake_case )
_lowerCAmelCase = hparams.src_lang
_lowerCAmelCase = hparams.tgt_lang
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_bleu(_snake_case , _snake_case )
def _UpperCAmelCase ( snake_case , snake_case=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=snake_case )
check_output_dir(snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowerCAmelCase = SummarizationModule(snake_case )
else:
_lowerCAmelCase = TranslationModule(snake_case )
_lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
_lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , snake_case )
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_lowerCAmelCase = False
_lowerCAmelCase = args.val_metric == """loss"""
_lowerCAmelCase = generic_train(
snake_case , snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case ) , early_stopping_callback=snake_case , logger=snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
_lowerCAmelCase = """"""
_lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=snake_case ) )
if checkpoints:
_lowerCAmelCase = checkpoints[-1]
_lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(parser)
A__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A__ = parser.parse_args()
main(args)
| 82 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray ,lowerCamelCase_ : np.ndarray):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.zeros_like(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1))
# Copy image to padded image
lowerCAmelCase__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
lowerCAmelCase__ : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase__ : Optional[int] = int(summation > 0)
return output
if __name__ == "__main__":
# read original image
__snake_case : List[Any] =Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__snake_case : Optional[Any] =np.array(Image.open(lena_path))
# kernel to be applied
__snake_case : Union[str, Any] =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__snake_case : Optional[Any] =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__snake_case : Optional[int] =Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 94 |
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = 1, 1
lowerCAmelCase__ : Any = 2
while True:
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Any = fa + fa
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = fa, f
index += 1
for _ in str(lowerCamelCase_):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCamelCase_ ( __a ):
def __init__( self : Union[str, Any] , _A : List[Any] , _A : int=False , _A : Union[str, Any]=True , _A : Any=False , _A : Any="<s>" , _A : str="</s>" , _A : List[str]="<unk>" , _A : List[Any]="<sep>" , _A : Dict="<pad>" , _A : Dict="<cls>" , _A : List[Any]="<mask>" , _A : Dict=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
UpperCAmelCase__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Any = do_lower_case
UpperCAmelCase__ : int = remove_space
UpperCAmelCase__ : Tuple = keep_accents
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
UpperCAmelCase__ : Optional[Any] = jieba
UpperCAmelCase__ : Optional[int] = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.__dict__.copy()
UpperCAmelCase__ : Any = None
return state
def __setstate__( self : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Optional[int] , _A : Tuple ):
'''simple docstring'''
if self.remove_space:
UpperCAmelCase__ : Tuple = ''' '''.join(inputs.strip().split() )
else:
UpperCAmelCase__ : Union[str, Any] = inputs
UpperCAmelCase__ : List[Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
UpperCAmelCase__ : Dict = unicodedata.normalize('''NFKD''' , _A )
UpperCAmelCase__ : Dict = ''''''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
UpperCAmelCase__ : Optional[int] = outputs.lower()
return outputs
def lowercase_ ( self : int , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.preprocess_text(_A )
UpperCAmelCase__ : Optional[int] = self.sp_model.encode(_A , out_type=_A )
UpperCAmelCase__ : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCAmelCase__ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ : Optional[Any] = cur_pieces[1:]
else:
UpperCAmelCase__ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def lowercase_ ( self : List[str] , _A : str ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def lowercase_ ( self : Tuple , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def lowercase_ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Optional[Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
UpperCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def lowercase_ ( self : int , *_A : List[Any] , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = super()._decode(*_A , **_A )
UpperCAmelCase__ : Optional[int] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 181 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : int = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : str = tempfile.mkdtemp()
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Tuple = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Dict , **_A : str ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , **_A : int ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : str = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : Optional[Any] = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Union[str, Any] = processor(text=_A )
UpperCAmelCase__ : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : str , _A : int=(2, 10, 16) , _A : Optional[int]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : Union[str, Any] = processor.decode(_A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : List[Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = self._get_dummy_logits()
UpperCAmelCase__ : Optional[Any] = 15
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : List[str] = -4.0
UpperCAmelCase__ : str = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : Any = -2_0.0
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Any = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[Any] = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Any = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : str = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[Any] = os.listdir(_A )
UpperCAmelCase__ : int = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : Tuple = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : Union[str, Any] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : List[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase__ : int = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Any = iter(_A )
UpperCAmelCase__ : Dict = next(_A )
UpperCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : List[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : List[str] = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : List[str] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : Tuple = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : Dict = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : Any = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 181 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCamelCase = logging.getLogger(__name__)
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = False
def lowercase__ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
if not self.initialized:
UpperCAmelCase_ = RagRetriever(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
UpperCAmelCase_ = True
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.retriever.index.init_index()
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.retriever._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None ) -> List[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(_UpperCAmelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
UpperCAmelCase_ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for worker in self.retrieval_workers
] )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = ray.get(random_worker.retrieve.remote(_UpperCAmelCase , _UpperCAmelCase ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=None , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return super(_UpperCAmelCase , cls ).get_tokenizers(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("config" , _UpperCAmelCase ) or RagConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = RagTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
UpperCAmelCase_ = rag_tokenizer.question_encoder
UpperCAmelCase_ = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase_ = "custom"
UpperCAmelCase_ = CustomHFIndex(config.retrieval_vector_size , _UpperCAmelCase )
else:
UpperCAmelCase_ = cls._build_index(_UpperCAmelCase )
return cls(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , retrieval_workers=_UpperCAmelCase , index=_UpperCAmelCase , )
| 241 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A red cat sitting on a park bench"
UpperCAmelCase_ = np.random.RandomState(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 241 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
a__ = ['''accelerate''', '''launch''']
a__ = Path.home() / '''.cache/huggingface/accelerate'''
a__ = '''default_config.yaml'''
a__ = config_folder / config_file
a__ = config_folder / '''_default_config.yaml'''
a__ = Path("""tests/test_configs""" )
@classmethod
def _lowercase ( cls : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowercase ( cls : Dict ) -> List[str]:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=UpperCamelCase__ ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(UpperCamelCase__ ), self.test_file_path] , env=os.environ.copy() )
def _lowercase ( self : int ) -> str:
"""simple docstring"""
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = '''test-tpu'''
a__ = '''us-central1-a'''
a__ = '''ls'''
a__ = ['''accelerate''', '''tpu-config''']
a__ = '''cd /usr/share'''
a__ = '''tests/test_samples/test_command_file.sh'''
a__ = '''Running gcloud compute tpus tpu-vm ssh'''
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=UpperCamelCase__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all''' , UpperCamelCase__ , )
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__magic_name__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=UpperCamelCase__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all''' , UpperCamelCase__ , )
| 88 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [int(__UpperCamelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__UpperCamelCase ) == 4 and all(0 <= int(__UpperCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
UpperCamelCase_ = input().strip()
UpperCamelCase_ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 251 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase__ = 0
UpperCamelCase__ = 0Xe0_00
UpperCamelCase__ = 0Xe0_01
UpperCamelCase__ = 0Xe0_02
UpperCamelCase__ = 0Xe0_03
UpperCamelCase__ = 0Xe0_04
# Maps special codepoints to human-readable names.
UpperCamelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '[CLS]',
SEP: '[SEP]',
BOS: '[BOS]',
MASK: '[MASK]',
PAD: '[PAD]',
RESERVED: '[RESERVED]',
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : List[str] , __UpperCAmelCase : int=chr(__UpperCAmelCase ) , __UpperCAmelCase : int=chr(__UpperCAmelCase ) , __UpperCAmelCase : Dict=chr(__UpperCAmelCase ) , __UpperCAmelCase : int=chr(__UpperCAmelCase ) , __UpperCAmelCase : Optional[int]=chr(__UpperCAmelCase ) , __UpperCAmelCase : Any=chr(__UpperCAmelCase ) , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[Any]=2_0_4_8 , **__UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , model_max_length=__UpperCAmelCase , **__UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ = UNICODE_VOCAB_SIZE
UpperCAmelCase__ = len(self._special_codepoints )
@property
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return list(__UpperCAmelCase )
def lowercase_ (self : List[Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def lowercase_ (self : int , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def lowercase_ (self : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return "".join(__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def lowercase_ (self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase_ (self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Any:
"""simple docstring"""
return ()
| 363 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 143 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LDMTextToImagePipeline
UpperCamelCase__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
UpperCamelCase__ : Dict = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
UpperCamelCase__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : Any = False
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_A )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def _A ( self , _A , _A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self , _A , _A=torch.floataa , _A=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.manual_seed(_A )
__SCREAMING_SNAKE_CASE = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_A ).to(device=_A , dtype=_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
__SCREAMING_SNAKE_CASE = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self , _A , _A=torch.floataa , _A=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.manual_seed(_A )
__SCREAMING_SNAKE_CASE = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_A ).to(device=_A , dtype=_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A ).images[0]
__SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
__SCREAMING_SNAKE_CASE = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 362 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase__ : Optional[int] =True
from torch.cuda.amp import autocast
lowerCAmelCase__ : List[Any] =logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase__ : Optional[bool] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase__ : Optional[bool] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase__ : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def __lowercase ( a__ , a__ ) -> Dict:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = logging.WARNING
if model_args.verbose_logging:
__SCREAMING_SNAKE_CASE = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__SCREAMING_SNAKE_CASE = logging.INFO
logger.setLevel(a__ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
default=UpperCamelCase_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase__ : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase__ : bool = field(
default=UpperCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase__ : Optional[int] = field(
default=UpperCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase__ : Optional[float] = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : WavaVecaForPreTraining
UpperCamelCase__ : WavaVecaFeatureExtractor
UpperCamelCase__ : Union[bool, str] = "longest"
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(
_A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
__SCREAMING_SNAKE_CASE = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
__SCREAMING_SNAKE_CASE = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__SCREAMING_SNAKE_CASE = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_A , min_masks=2 , )
return batch
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , _A=1 , _A=0 , _A=1.0 , **_A ):
'''simple docstring'''
super().__init__(*_A , **_A )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = max_gumbel_temp
__SCREAMING_SNAKE_CASE = min_gumbel_temp
__SCREAMING_SNAKE_CASE = gumbel_temp_decay
def _A ( self , _A , _A ):
'''simple docstring'''
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
__SCREAMING_SNAKE_CASE = self.compute_loss(_A , _A )
else:
__SCREAMING_SNAKE_CASE = self.compute_loss(_A , _A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__SCREAMING_SNAKE_CASE = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __lowercase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__ )
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__SCREAMING_SNAKE_CASE = DatasetDict()
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__SCREAMING_SNAKE_CASE = DatasetDict()
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__SCREAMING_SNAKE_CASE = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
__SCREAMING_SNAKE_CASE = vectorized_datasets.filter(
lambda a__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__SCREAMING_SNAKE_CASE = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
__SCREAMING_SNAKE_CASE = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__ )
__SCREAMING_SNAKE_CASE = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 118 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = tmp_path / "file.csv"
snake_case_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(snake_case , "w" ) as f:
f.write(snake_case )
return str(snake_case )
@pytest.fixture
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = tmp_path / "malformed_file.csv"
snake_case_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(snake_case , "w" ) as f:
f.write(snake_case )
return str(snake_case )
@pytest.fixture
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = tmp_path / "csv_with_image.csv"
snake_case_ = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(snake_case , "w" ) as f:
f.write(snake_case )
return str(snake_case )
@pytest.fixture
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = tmp_path / "csv_with_label.csv"
snake_case_ = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(snake_case , "w" ) as f:
f.write(snake_case )
return str(snake_case )
@pytest.fixture
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = tmp_path / "csv_with_int_list.csv"
snake_case_ = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(snake_case , "w" ) as f:
f.write(snake_case )
return str(snake_case )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = Csv()
snake_case_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(snake_case ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as f:
snake_case_ = f.read().splitlines()[1]
snake_case_ = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
snake_case_ = csv._generate_tables([[csv_file_with_image]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
snake_case_ = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as f:
snake_case_ = f.read().splitlines()[1:]
snake_case_ = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
snake_case_ = csv._generate_tables([[csv_file_with_label]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
snake_case_ = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(snake_case ) for label in labels]
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
snake_case_ = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda snake_case : [int(snake_case ) for i in x.split()]} )
snake_case_ = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
snake_case_ = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 85 | """simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Union[str, Any]=True ) -> Union[str, Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__a = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__a = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
__a = config_class.from_json_file(SCREAMING_SNAKE_CASE__ )
__a = True
__a = True
print(f'''Building TensorFlow model from configuration: {config}''' )
__a = model_class(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__a = cached_file(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__a = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if compare_with_pt_model:
__a = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE__ ) # build the network
__a = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
__a = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__a = pt_model(**pt_model.dummy_inputs )
__a = pto[0].numpy()
__a = tfo[0].numpy()
__a = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE__ , save_format='''h5''' )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=False , ) -> List[str]:
if args_model_type is None:
__a = list(MODEL_CLASSES.keys() )
else:
__a = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE__ , start=1 ):
print('''=''' * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE__ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__a = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__a = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__a = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__a = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__a = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
else:
__a = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__a = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
else:
__a = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
__a = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE__ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE__ , config_file=SCREAMING_SNAKE_CASE__ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE__ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=SCREAMING_SNAKE_CASE__ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE__ )
os.remove(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 0 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase_ = """scheduler_config.json"""
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 5
UpperCamelCase = 6
UpperCamelCase = 7
UpperCamelCase = 8
UpperCamelCase = 9
UpperCamelCase = 10
UpperCamelCase = 11
UpperCamelCase = 12
UpperCamelCase = 13
UpperCamelCase = 14
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
class a_ :
'''simple docstring'''
UpperCamelCase = SCHEDULER_CONFIG_NAME
UpperCamelCase = []
UpperCamelCase = True
@classmethod
def snake_case_( cls , A = None , A = None , A=False , **A , ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def snake_case_( self , A , A = False , **A ) -> List[Any]:
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def snake_case_( self ) -> List[str]:
return self._get_compatibles()
@classmethod
def snake_case_( cls ) -> Dict:
_SCREAMING_SNAKE_CASE = list(set([cls.__name__] + cls._compatibles ) )
_SCREAMING_SNAKE_CASE = importlib.import_module(__name__.split(""".""" )[0] )
_SCREAMING_SNAKE_CASE = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Any = '''xmod'''
def __init__( self ,SCREAMING_SNAKE_CASE__=3_05_22 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__="absolute" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=("en_XX",) ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE :Any = use_cache
__SCREAMING_SNAKE_CASE :List[str] = classifier_dropout
__SCREAMING_SNAKE_CASE :Any = pre_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reduction_factor
__SCREAMING_SNAKE_CASE :Dict = adapter_layer_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reuse_layer_norm
__SCREAMING_SNAKE_CASE :Tuple = ln_before_adapter
__SCREAMING_SNAKE_CASE :Any = list(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = default_language
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 191 | 0 |
'''simple docstring'''
import math
import random
def a_ ( lowerCamelCase : float , lowerCamelCase : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__snake_case =0.0_2
def a_ ( lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCamelCase ):
# Forward propagation
lowerCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCAmelCase = (expected / 100) - layer_a
# Error delta
lowerCAmelCase = layer_1_error * sigmoid_function(lowerCamelCase , lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case =int(input("""Expected value: """))
__snake_case =int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 55 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__snake_case =TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : int , UpperCAmelCase__ : T ) -> List[str]:
lowerCAmelCase = data
lowerCAmelCase = None
def __str__( self : Optional[int] ) -> str:
return F'''{self.data}'''
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
lowerCAmelCase = None
def __iter__( self : Any ) -> Iterator[T]:
lowerCAmelCase = self.top
while node:
yield node.data
lowerCAmelCase = node.next
def __str__( self : str ) -> str:
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __len__( self : Optional[int] ) -> int:
return len(tuple(iter(self ) ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> bool:
return self.top is None
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : T ) -> None:
lowerCAmelCase = Node(UpperCAmelCase__ )
if not self.is_empty():
lowerCAmelCase = self.top
lowerCAmelCase = node
def __UpperCAmelCase ( self : str ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCAmelCase__ )
lowerCAmelCase = self.top
lowerCAmelCase = self.top.next
return pop_node.data
def __UpperCAmelCase ( self : List[Any] ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = params
_lowerCAmelCase : List[str] = np.array(__a)
_lowerCAmelCase : Optional[Any] = np.array([len(__a) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self, __a):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self):
'''simple docstring'''
return len(self.lengths)
def snake_case__ ( self):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.params.max_model_input_size
_lowerCAmelCase : List[str] = self.lengths > max_len
logger.info(f"Splitting {sum(__a)} too long sequences.")
def divide_chunks(__a, __a):
return [l[i : i + n] for i in range(0, len(__a), __a)]
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[int] = []
if self.params.mlm:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids, self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
_lowerCAmelCase : int = []
for sub_s in divide_chunks(seq_, max_len - 2):
if sub_s[0] != cls_id:
_lowerCAmelCase : Union[str, Any] = np.insert(__a, 0, __a)
if sub_s[-1] != sep_id:
_lowerCAmelCase : Any = np.insert(__a, len(__a), __a)
assert len(__a) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__a)
new_tok_ids.extend(__a)
new_lengths.extend([len(__a) for l in sub_seqs])
_lowerCAmelCase : Any = np.array(__a)
_lowerCAmelCase : Optional[Any] = np.array(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len(self)
_lowerCAmelCase : str = self.lengths > 11
_lowerCAmelCase : Tuple = self.token_ids[indices]
_lowerCAmelCase : Optional[Any] = self.lengths[indices]
_lowerCAmelCase : List[Any] = len(self)
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences.")
def snake_case__ ( self):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCAmelCase : Any = self.params.special_tok_ids["unk_token"]
_lowerCAmelCase : int = len(self)
_lowerCAmelCase : Any = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
_lowerCAmelCase : int = (unk_occs / self.lengths) < 0.5
_lowerCAmelCase : List[str] = self.token_ids[indices]
_lowerCAmelCase : List[Any] = self.lengths[indices]
_lowerCAmelCase : Optional[int] = len(self)
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).")
def snake_case__ ( self):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"{len(self)} sequences")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [t[0] for t in batch]
_lowerCAmelCase : Any = [t[1] for t in batch]
assert len(__a) == len(__a)
# Max for paddings
_lowerCAmelCase : List[Any] = max(__a)
# Pad token ids
if self.params.mlm:
_lowerCAmelCase : Any = self.params.special_tok_ids["pad_token"]
else:
_lowerCAmelCase : int = self.params.special_tok_ids["unk_token"]
_lowerCAmelCase : int = [list(t.astype(__a)) + [pad_idx] * (max_seq_len_ - len(__a)) for t in token_ids]
assert len(tk_) == len(__a)
assert all(len(__a) == max_seq_len_ for t in tk_)
_lowerCAmelCase : Union[str, Any] = torch.tensor(tk_) # (bs, max_seq_len_)
_lowerCAmelCase : Optional[Any] = torch.tensor(__a) # (bs)
return tk_t, lg_t
| 36 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ : str = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "albert"
def __init__( self : Union[str, Any] , _lowerCAmelCase : str=30_000 , _lowerCAmelCase : Optional[Any]=128 , _lowerCAmelCase : Any=4_096 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Dict=16_384 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any="gelu_new" , _lowerCAmelCase : int=0 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Union[str, Any]=1E-12 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Union[str, Any]="absolute" , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=3 , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = embedding_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_hidden_groups
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = inner_group_num
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = position_embedding_type
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[int] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 210 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Dict ):
raise NotImplementedError() | 210 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , **snake_case_ ):
"""simple docstring"""
requires_backends(self , ['bs4'] )
super().__init__(**snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = []
A_ : List[str] = []
A_ : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A_ : int = parent.find_all(child.name , recursive=snake_case_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(snake_case_ ) else next(i for i, s in enumerate(snake_case_ , 1 ) if s is child ) )
A_ : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = BeautifulSoup(snake_case_ , 'html.parser' )
A_ : Dict = []
A_ : Dict = []
A_ : Tuple = []
for element in html_code.descendants:
if type(snake_case_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
A_ : Union[str, Any] = html.unescape(snake_case_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(snake_case_ )
A_ , A_ : Optional[Any] = self.xpath_soup(snake_case_ )
stringaxtag_seq.append(snake_case_ )
stringaxsubs_seq.append(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = ''
for tagname, subs in zip(snake_case_ , snake_case_ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = False
# Check that strings has a valid type
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = True
elif isinstance(snake_case_ , (list, tuple) ):
if len(snake_case_ ) == 0 or isinstance(html_strings[0] , snake_case_ ):
A_ : str = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"""but is of type {type(snake_case_ )}.""" )
A_ : str = bool(isinstance(snake_case_ , (list, tuple) ) and (isinstance(html_strings[0] , snake_case_ )) )
if not is_batched:
A_ : Optional[Any] = [html_strings]
# Get nodes + xpaths
A_ : str = []
A_ : Tuple = []
for html_string in html_strings:
A_ , A_ , A_ : Optional[Any] = self.get_three_from_single(snake_case_ )
nodes.append(snake_case_ )
A_ : List[Any] = []
for node, tag_list, sub_list in zip(snake_case_ , snake_case_ , snake_case_ ):
A_ : int = self.construct_xpath(snake_case_ , snake_case_ )
xpath_strings.append(snake_case_ )
xpaths.append(snake_case_ )
# return as Dict
A_ : Tuple = {'nodes': nodes, 'xpaths': xpaths}
A_ : List[Any] = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs | 286 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 286 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[int] , UpperCamelCase_: pyspark.sql.DataFrame , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = "arrow" , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
__lowerCamelCase = load_from_cache_file
__lowerCamelCase = file_format
__lowerCamelCase = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def lowerCAmelCase__ ( self: Optional[Any] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowerCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 356 |
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCamelCase ( _UpperCamelCase=32 , _UpperCamelCase=10 , _UpperCamelCase=100 , _UpperCamelCase=1026 , _UpperCamelCase=True , _UpperCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__lowerCAmelCase = generate_datasets(
A_ , A_ , number=A_ , min_len=1026 , trim=A_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__lowerCAmelCase = load_gpta("gpt2" ).to(A_ )
print("computing perplexity on objective set" )
__lowerCAmelCase = compute_perplexity(A_ , A_ , A_ ).item()
print("perplexity on objective set:" , A_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=15 , _UpperCamelCase=128 , _UpperCamelCase=100 , _UpperCamelCase="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__lowerCAmelCase = SecondaryLearner(A_ )
# Train secondary learner
__lowerCAmelCase = train_secondary_learner(
A_ , A_ , max_epochs=A_ , batch_size=A_ , eval_freq=100 , igf_model_path=A_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=32 , _UpperCamelCase=1000 , _UpperCamelCase=16 , _UpperCamelCase=1.0 , _UpperCamelCase=recopy_gpta , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase="gpt2_finetuned.pt" , ):
'''simple docstring'''
__lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase = RandomSampler(A_ )
__lowerCAmelCase = DataLoader(A_ , sampler=A_ )
__lowerCAmelCase = max_steps // (len(A_ )) + 1
__lowerCAmelCase = 0
__lowerCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=A_ )
__lowerCAmelCase = recopy_model(A_ , A_ , A_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(A_ )
secondary_learner.eval()
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
# Compute the performance of the transformer model at the beginning
__lowerCAmelCase = compute_perplexity(A_ , A_ , A_ )
test_perps.append(A_ )
print("Test perplexity, step" , A_ , ":" , A_ )
for epoch in range(int(A_ ) ):
for step, example in enumerate(A_ ):
torch.cuda.empty_cache()
__lowerCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
__lowerCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowerCAmelCase = model(A_ , labels=A_ )
__lowerCAmelCase = True
if secondary_learner is not None:
__lowerCAmelCase = secondary_learner.forward(
torch.tensor(A_ , dtype=torch.long , device=A_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowerCAmelCase = -1
if predicted_q < threshold:
__lowerCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowerCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowerCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowerCAmelCase = compute_perplexity(A_ , A_ , A_ )
test_perps.append(A_ )
print("Test perplexity, step" , A_ , ":" , A_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=A_ , default=A_ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=A_ , default=A_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=A_ , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=A_ , default=A_ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=A_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=A_ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=A_ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=A_ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=A_ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=A_ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=A_ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=A_ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=A_ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=A_ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=A_ , type=A_ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=A_ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A_ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=A_ , type=A_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__lowerCAmelCase = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__lowerCAmelCase = training_secondary_learner(
A_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowerCAmelCase = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=A_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A_ , A_ , A_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A_ , secondary_learner=A_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 57 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowerCAmelCase__ ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase_ : Tuple = kwargs.pop('''main_process_only''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = kwargs.pop('''in_order''' , __SCREAMING_SNAKE_CASE )
if self.isEnabledFor(__SCREAMING_SNAKE_CASE ):
if self._should_log(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : Optional[Any] = self.process(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.logger.log(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif in_order:
lowercase_ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase_ , lowercase_ : Optional[int] = self.process(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.logger.log(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str = None ):
"""simple docstring"""
if log_level is None:
lowercase_ : Any = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = logging.getLogger(__SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__SCREAMING_SNAKE_CASE , {} )
| 264 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 264 | 1 |
'''simple docstring'''
from math import sqrt
def a_ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'status' must been from type bool"
return status
def a_ ( lowerCamelCase : Tuple ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase = list(range(2 , n + 1 ) )
lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase ) ):
for j in range(i + 1 , len(lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase = 0
# filters actual prime numbers.
lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Optional[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase ):
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Dict ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase = 2
lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase ):
while quotient != 1:
if is_prime(lowerCamelCase ) and (quotient % factor == 0):
ans.append(lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : List[str] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase = 0
# prime factorization of 'number'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = max(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def a_ ( lowerCamelCase : Optional[int] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase = 0
# prime factorization of 'number'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = min(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( lowerCamelCase : Union[str, Any] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase ) and (number > 2) and is_even(lowerCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase = get_prime_numbers(lowerCamelCase )
lowerCAmelCase = len(lowerCamelCase )
# run variable for while-loops.
lowerCAmelCase = 0
lowerCAmelCase = None
# exit variable. for break up the loops
lowerCAmelCase = True
while i < len_pn and loop:
lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (len(lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( lowerCamelCase : Dict , lowerCamelCase : int ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase = 0
while numbera != 0:
lowerCAmelCase = numbera % numbera
lowerCAmelCase = numbera
lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = prime_factorization(lowerCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(max(lowerCamelCase , lowerCamelCase ) ):
ans *= n
else:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase = 0
lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase ):
ans += 1
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and is_prime(
lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
assert (
is_prime(lowerCamelCase ) and is_prime(lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase = p_number_a + 1 # jump to the next number
lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and ans[0] != p_number_a
and ans[len(lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( lowerCamelCase : Optional[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( lowerCamelCase : List[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase = get_divisors(lowerCamelCase )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase = gcd(abs(lowerCamelCase ) , abs(lowerCamelCase ) )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( lowerCamelCase : str ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a_ ( lowerCamelCase : List[str] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase = ans
ans += fiba
lowerCAmelCase = tmp
return ans
| 4 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: str = batch_size
lowercase__: Dict = seq_length
lowercase__: str = is_training
lowercase__: List[str] = use_input_mask
lowercase__: str = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: int = vocab_size
lowercase__: Dict = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Optional[Any] = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Optional[int] = num_labels
lowercase__: Union[str, Any] = num_choices
lowercase__: int = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[int] = None
if self.use_token_type_ids:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Tuple = None
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Tuple = self.prepare_config_and_inputs()
lowercase__: Optional[int] = True
lowercase__: Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[Any] = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = True
lowercase__: Optional[Any] = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
lowercase__: str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Any = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_labels
lowercase__: List[Any] = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Dict = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.num_choices
lowercase__: str = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): str = config_and_inputs
lowercase__: Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
lowercase__: int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = NezhaModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__: str = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase__: Optional[int] = True
lowercase__: Optional[int] = model_class(config=lowerCAmelCase__ )
lowercase__: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'bert.pt' ) )
lowercase__: List[str] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'bert.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 288 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase = '''base_with_context'''
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowercase__: Optional[int] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: List[str] = weights[f'layers_{lyr_num}']
lowercase__: List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: Any = ly_weight['attention']
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: str = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: str = weights[f'layers_{lyr_num}']
lowercase__: Optional[Any] = ly_weight['attention']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: int = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__: Optional[Any] = weights[f'layers_{lyr_num}']
lowercase__: Any = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = ly_weight['self_attention']
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = ly_weight['MultiHeadDotProductAttention_0']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def snake_case_ ( snake_case ) -> Any:
lowercase__: int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__: Tuple = jnp.tree_util.tree_map(onp.array , snake_case )
lowercase__: List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowercase__: List[Any] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowercase__: Optional[Any] = inference.parse_training_gin_file(snake_case , snake_case )
lowercase__: str = inference.InferenceModel(args.checkpoint_path , snake_case )
lowercase__: Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowercase__: List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Optional[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__: Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case )
lowercase__: int = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case )
lowercase__: Optional[int] = load_decoder(ta_checkpoint['target']['decoder'] , snake_case )
lowercase__: int = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowercase__: List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowerCAmelCase = parser.parse_args()
main(args)
| 288 | 1 |
'''simple docstring'''
__a = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602176634E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355818,
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case__ : Optional[int] = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(_lowerCAmelCase )}"
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = ['pixel_values']
def __init__(self : List[str] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = size if size is not None else {"shortest_edge": 2_2_4}
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
UpperCAmelCase__ = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_flip_channel_order
def lowercase_ (self : str , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PIL.Image.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Tuple , ) -> Any:
"""simple docstring"""
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(__UpperCAmelCase , data_format=__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ = [self.flip_channel_order(image=__UpperCAmelCase ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
UpperCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[Tuple] = None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCAmelCase ):
UpperCAmelCase__ = target_sizes.numpy()
UpperCAmelCase__ = []
for idx in range(len(__UpperCAmelCase ) ):
UpperCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCAmelCase )
UpperCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = logits.argmax(dim=1 )
UpperCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 143 | import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowerCAmelCase : Optional[Any] =getLogger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :int = 1_0_2_4 , lowerCAmelCase__ :Any="val" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=False , lowerCAmelCase__ :Optional[Any]="summarization" , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Union[str, Any]=1 , lowerCAmelCase__ :Dict = None , lowerCAmelCase__ :str="" , **lowerCAmelCase__ :int , ) -> Dict:
'''simple docstring'''
lowercase = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=lowerCAmelCase__ )
lowercase = Path(lowerCAmelCase__ )
lowercase = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(lowerCAmelCase__ )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
lowercase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
lowercase = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowercase = num_return_sequences
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowercase = tokenizer.model_max_length
if prefix is None:
lowercase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
lowercase = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1_0_2_4 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowercase = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
lowercase = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
lowercase = []
for batch in tqdm(lowerCAmelCase__ ):
lowercase = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
lowercase = batch["""ids"""]
if num_return_sequences > 1:
lowercase = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def UpperCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=lowerCAmelCase__ , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=lowerCAmelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=lowerCAmelCase__ , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
"""--type_path""" , type=lowerCAmelCase__ , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=lowerCAmelCase__ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=lowerCAmelCase__ , default=6_0_0 , required=lowerCAmelCase__ , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"""--prefix""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
lowercase = time.time()
lowercase , lowercase = parser.parse_known_args()
lowercase = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
lowercase = Path(args.save_dir + """_tmp""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
lowercase = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowercase = {}
if args.src_lang is not None:
lowercase = args.src_lang
if args.tgt_lang is not None:
lowercase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
lowercase , lowercase = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
lowercase = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
lowercase = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
lowercase = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
lowercase = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
lowercase = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(lowerCAmelCase__ ) as f:
lowercase = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
lowercase = """translation""" in args.task
lowercase = calculate_bleu if calc_bleu else calculate_rouge
lowercase = """bleu""" if calc_bleu else """rouge"""
lowercase = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
lowercase = time.time() - start_time
lowercase = round(runtime / metrics["""n_obs"""] , 4 )
lowercase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowercase = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> List:
'''simple docstring'''
lowercase = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
lowercase = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
lowercase = [x["""pred"""] for x in records]
return preds
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> List[Dict[str, List]]:
'''simple docstring'''
lowercase = time.time()
logger.info("""waiting for all nodes to finish""" )
lowercase = None
while (time.time() - start_wait) < timeout:
lowercase = list(save_dir.glob("""rank_*.json""" ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowercase = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 197 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[int] = ['torch', 'torchsde']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 197 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A:
def __init__( self , _snake_case , ) -> Tuple:
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TFEsmModel(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_snake_case )
__a = [input_ids, input_mask]
__a = model(_snake_case )
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a = True
__a = TFEsmModel(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_snake_case )
__a = [input_ids, input_mask]
__a = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
__a = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = TFEsmForMaskedLM(config=_snake_case )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_snake_case )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_snake_case )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_SCREAMING_SNAKE_CASE : int = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_SCREAMING_SNAKE_CASE : List[Any] = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self , a__ ) -> Dict:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = FSMTForConditionalGeneration.from_pretrained(a__ ).to(a__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 2_6.0],
["ru-en", 2_2.0],
["en-de", 2_2.0],
["de-en", 2_9.0],
] )
@slow
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = F'facebook/wmt19-{pair}'
snake_case_ = self.get_tokenizer(a__ )
snake_case_ = self.get_model(a__ )
snake_case_ = bleu_data[pair]["src"]
snake_case_ = bleu_data[pair]["tgt"]
snake_case_ = tokenizer(a__ , return_tensors="pt" , truncation=a__ , padding="longest" ).to(a__ )
snake_case_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case_ = tokenizer.batch_decode(
a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
snake_case_ = calculate_bleu(a__ , a__ )
print(a__ )
self.assertGreaterEqual(scores["bleu"] , a__ )
| 85 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Optional[Any] = {"UserAgent": UserAgent().random}
def snake_case ( UpperCAmelCase )-> dict:
"""simple docstring"""
__A = script.contents[0]
__A = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase__ :
def __init__( self :Optional[Any] , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = F'https://www.instagram.com/{username}/'
__A = self.get_json()
def lowercase_ ( self :Union[str, Any] ) -> dict:
'''simple docstring'''
__A = requests.get(self.url , headers=_A ).text
__A = BeautifulSoup(_A , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self :Union[str, Any] ) -> str:
'''simple docstring'''
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self :List[Any] ) -> str:
'''simple docstring'''
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self :Tuple ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self :Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowercase_ ( self :Union[str, Any] ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def snake_case ( UpperCAmelCase = "github" )-> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__A = InstagramUser(UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 161 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=_SCREAMING_SNAKE_CASE , vqvae=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ldm.to(_SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='numpy' ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='numpy' , return_dict=_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_UpperCAmelCase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type='numpy' ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
_UpperCAmelCase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''BeitFeatureExtractor''']
lowerCAmelCase__ :Optional[Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int ) ->List[Any]:
"""simple docstring"""
if openai_config_file == "":
__snake_case : Dict = OpenAIGPTConfig()
else:
__snake_case : int = OpenAIGPTConfig.from_json_file(_snake_case )
__snake_case : Tuple = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 102 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int | str ) ->bool:
"""simple docstring"""
__snake_case : List[str] = str(_snake_case )
return n == n[::-1]
def lowercase ( _snake_case : int = 1_000_000 ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
for i in range(1 , _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 102 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case_ : int ) ->bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(snake_case_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ) ->Iterator[int]:
'''simple docstring'''
__A : int = 2
while True:
if is_prime(snake_case_ ):
yield num
num += 1
def __lowercase ( snake_case_ : int = 2000000 ) ->int:
'''simple docstring'''
return sum(takewhile(lambda snake_case_ : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 291 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ = pytest.mark.integration
a_ = {"""comet"""}
a_ = importlib.util.find_spec("""fairseq""") is not None
a_ = {"""code_eval"""}
a_ = os.name == """nt"""
a_ = {"""bertscore""", """frugalscore""", """perplexity"""}
a_ = importlib.util.find_spec("""transformers""") is not None
def __lowercase ( snake_case_ : str ) ->Any:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : List[Any] ,snake_case_ : int ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( snake_case_ : int ) ->str:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : List[Any] ,snake_case_ : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( snake_case_ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : int ,snake_case_ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( ) ->Tuple:
'''simple docstring'''
__A : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class __snake_case ( parameterized.TestCase ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = '''[...]'''
__A : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path )
__A : str = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCamelCase )
# check parameters
__A : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__A : Tuple = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = '''[...]'''
__A : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__A : Union[str, Any] = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCamelCase ):
yield
else:
yield
@contextmanager
def UpperCamelCase__( self ):
'''simple docstring'''
def load_local_metric(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
return load_metric(os.path.join('''metrics''' , __lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__A : List[Any] = load_local_metric
yield
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase ):
'''simple docstring'''
def wrapper(__lowerCamelCase ):
__A : Any = contextmanager(__lowerCamelCase )
__A : Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowercase ( snake_case_ : Tuple ) ->int:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__A : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowercase ( snake_case_ : List[str] ) ->Dict:
'''simple docstring'''
import torch
def bert_cos_score_idf(snake_case_ : Union[str, Any] ,snake_case_ : List[str] ,*snake_case_ : List[str] ,**snake_case_ : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__A : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowercase ( snake_case_ : Optional[int] ) ->List[Any]:
'''simple docstring'''
def load_from_checkpoint(snake_case_ : str ):
class __snake_case :
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
assert len(__lowerCamelCase ) == 2
__A : str = [0.1_9, 0.9_2]
return scores, sum(__lowerCamelCase ) / len(__lowerCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__A : int = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__A : Dict = load_from_checkpoint
yield
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Optional[Any] = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
__A : Optional[int] = '''ERROR'''
__A : str = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(snake_case_ ,match=re.escape(snake_case_ ) ):
metric.compute(predictions=[] ,references=[] ,scheme=snake_case_ )
| 291 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''LayoutLMv3FeatureExtractor''']
A__ = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230 |
import math
import sys
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = ''''''
try:
with open(__lowerCAmelCase , '''rb''' ) as binary_file:
snake_case__ : int = binary_file.read()
for dat in data:
snake_case__ : Any = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = {'''0''': '''0''', '''1''': '''1'''}
snake_case__ , snake_case__ : List[Any] = '''''', ''''''
snake_case__ : Tuple = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case__ : Tuple = lexicon[curr_string]
result += last_match_id
snake_case__ : Any = last_match_id + '''0'''
if math.loga(__lowerCAmelCase ).is_integer():
snake_case__ : Tuple = {}
for curr_key in list(__lowerCAmelCase ):
snake_case__ : Union[str, Any] = lexicon.pop(__lowerCAmelCase )
snake_case__ : Optional[Any] = new_lex
snake_case__ : Tuple = last_match_id + '''1'''
index += 1
snake_case__ : Dict = ''''''
return result
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Dict = 8
try:
with open(__lowerCAmelCase , '''wb''' ) as opened_file:
snake_case__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case__ : Optional[int] = data_bits[counter:]
snake_case__ : int = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Union[str, Any] = read_file_binary(__lowerCAmelCase )
snake_case__ : List[str] = remove_prefix(__lowerCAmelCase )
snake_case__ : Any = decompress_data(__lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 230 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ) -> str:
'''simple docstring'''
__lowerCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCamelCase )
return parser.parse_args()
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : int = parse_args()
# Import training_script as a module.
__lowerCamelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCamelCase : Any = script_fpath.stem
__lowerCamelCase : str = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
__lowerCamelCase : str = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 64 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "deta"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=900 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[Any]=1.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]="sine" , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=300 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.2_5 , **UpperCAmelCase : List[str] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = backbone_config.pop("model_type" )
__lowerCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : List[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = backbone_config
__lowerCamelCase : Any = num_queries
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[str] = encoder_ffn_dim
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : str = encoder_attention_heads
__lowerCamelCase : Dict = decoder_ffn_dim
__lowerCamelCase : Tuple = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : Optional[Any] = init_xavier_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Dict = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : str = encoder_n_points
__lowerCamelCase : List[str] = decoder_n_points
__lowerCamelCase : List[str] = two_stage
__lowerCamelCase : Dict = two_stage_num_proposals
__lowerCamelCase : int = with_box_refine
__lowerCamelCase : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : str = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Tuple = giou_cost
# Loss coefficients
__lowerCamelCase : List[Any] = mask_loss_coefficient
__lowerCamelCase : Dict = dice_loss_coefficient
__lowerCamelCase : Any = bbox_loss_coefficient
__lowerCamelCase : Dict = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = eos_coefficient
__lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.d_model
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output | 64 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case = get_tests_dir("fixtures")
_snake_case = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_snake_case = get_tests_dir("fixtures/dummy-config.json")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = 0
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
self.assertIsInstance(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a)
self.assertIsInstance(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained(__a).to_dict()
config_dict.pop("feature_extractor_type")
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor(**__a)
# save in new folder
model_config.save_pretrained(__a)
config.save_pretrained(__a)
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(__a)
# make sure private variable is not incorrectly saved
_lowerCAmelCase : Any = json.loads(config.to_json_string())
self.assertTrue("_processor_class" not in dict_as_saved)
self.assertIsInstance(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained(__a)
self.assertIsInstance(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaisesRegex(
__a, "bert-base is not a local folder and is not a valid model identifier"):
_lowerCAmelCase : Dict = AutoFeatureExtractor.from_pretrained("bert-base")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaisesRegex(
__a, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(__a, revision="aaaaaa")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaisesRegex(
__a, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ):
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
_lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor")
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a):
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__a)
_lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__a)
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor")
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a)
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(__a, trust_remote_code=__a)
self.assertEqual(reloaded_feature_extractor.__class__.__name__, "NewFeatureExtractor")
def snake_case__ ( self):
'''simple docstring'''
try:
AutoConfig.register("custom", __a)
AutoFeatureExtractor.register(__a, __a)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a):
AutoFeatureExtractor.register(__a, __a)
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : List[Any] = CustomFeatureExtractor.from_pretrained(__a)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a)
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(__a)
self.assertIsInstance(__a, __a)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self):
'''simple docstring'''
class UpperCAmelCase_ ( a):
lowerCamelCase__ = True
try:
AutoConfig.register("custom", __a)
AutoFeatureExtractor.register(__a, __a)
# If remote code is not set, the default is to use local
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor")
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor")
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
_lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__a)
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor")
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__a)
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor")
self.assertTrue(not hasattr(__a, "is_local"))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 36 |
import os
import pytest
from attr import dataclass
_UpperCAmelCase : List[str] = "us-east-1" # defaults region
@dataclass
class __lowerCAmelCase :
_a = 42
_a = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_a = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
_a = {**hyperparameters, '''max_steps''': 1000}
@property
def SCREAMING_SNAKE_CASE ( self: str ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
return F"{self.framework}-transfromers-test"
@property
def SCREAMING_SNAKE_CASE ( self: Any ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 236 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 |
from collections.abc import Iterable
from typing import Any
class _a :
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int | None = None )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : List[Any] )-> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} , indent=1 )
class _a :
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Node | None = None )-> int:
lowerCAmelCase__ : Dict = root
def __str__( self : Tuple )-> str:
return str(self.root )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Node , _SCREAMING_SNAKE_CASE : Node | None )-> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_SCREAMING_SNAKE_CASE ): # If it is the right children
lowerCAmelCase__ : List[str] = new_children
else:
lowerCAmelCase__ : Any = new_children
else:
lowerCAmelCase__ : Optional[Any] = new_children
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Node )-> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__( self : int )-> bool:
return self.root is None
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] )-> None:
lowerCAmelCase__ : Any = Node(_SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : Optional[Any] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : List[Any] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Tuple = new_node
break
else:
lowerCAmelCase__ : Union[str, Any] = parent_node.right
lowerCAmelCase__ : Dict = parent_node
def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : List[Any] )-> None:
for value in values:
self.__insert(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ : Union[str, Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Node | None = None )-> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : str = node.right
return node
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Node | None = None )-> Node | None:
if node is None:
lowerCAmelCase__ : Any = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : Dict = self.root
while node.left is not None:
lowerCAmelCase__ : Tuple = node.left
return node
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> None:
lowerCAmelCase__ : Optional[Any] = self.search(_SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.left )
else:
lowerCAmelCase__ : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Node | None )-> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=None )-> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : Node | None )-> None:
if node:
self.inorder(_SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(_SCREAMING_SNAKE_CASE , node.right )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Node )-> int:
lowerCAmelCase__ : list[int] = []
self.inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
if curr_node is not None:
lowerCAmelCase__ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(_a )
# Prints all the elements of the list in order traversal
print(_a )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_a )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 211 | 1 |
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
if n == 0:
return 0
lowercase_ = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase_ = max(
snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) )
return max_revue
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
lowercase_ = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ )
def a ( snake_case__: int , snake_case__: list , snake_case__: list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase_ = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase_ = max(
snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , )
lowercase_ = max_revenue
return max_rev[n]
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
_enforce_args(snake_case__ , snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase_ = [float('''-inf''' ) for _ in range(n + 1 )]
lowercase_ = 0
for i in range(1 , n + 1 ):
lowercase_ = max_rev[i]
for j in range(1 , i + 1 ):
lowercase_ = max(snake_case__ , prices[j - 1] + max_rev[i - j] )
lowercase_ = max_revenue_i
return max_rev[n]
def a ( snake_case__: int , snake_case__: list ):
'''simple docstring'''
if n < 0:
lowercase_ = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
lowercase_ = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(snake_case__ )}'''
)
raise ValueError(snake_case__ )
def a ( ):
'''simple docstring'''
lowercase_ = [6, 10, 12, 15, 20, 23]
lowercase_ = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase_ = 36
lowercase_ = top_down_cut_rod(snake_case__ , snake_case__ )
lowercase_ = bottom_up_cut_rod(snake_case__ , snake_case__ )
lowercase_ = naive_cut_rod_recursive(snake_case__ , snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 30 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Tuple = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
__UpperCAmelCase : Union[str, Any] = DetaConfig(
backbone_config=__lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__lowerCamelCase , with_box_refine=__lowerCamelCase , two_stage=__lowerCamelCase , )
# set labels
__UpperCAmelCase : List[str] = """huggingface/label-files"""
if "o365" in model_name:
__UpperCAmelCase : Any = 366
__UpperCAmelCase : Union[str, Any] = """object365-id2label.json"""
else:
__UpperCAmelCase : Tuple = 91
__UpperCAmelCase : str = """coco-detection-id2label.json"""
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__UpperCAmelCase : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Union[str, Any] = dct.pop(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : int = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__UpperCAmelCase : str = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:dim, :]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[: dim]
__UpperCAmelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Dict = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : List[str] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any ):
# transformer decoder self-attention layers
__UpperCAmelCase : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : Any = in_proj_bias[:hidden_size]
__UpperCAmelCase : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : List[Any] = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : int = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Any = get_deta_config(__lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Optional[Any] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
__UpperCAmelCase : int = torch.load(__lowerCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
# rename keys
__UpperCAmelCase : Dict = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : str = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = val
if "input_proj" in key:
__UpperCAmelCase : Any = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Dict = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Optional[Any] = DetaForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__lowerCamelCase )
# load image processor
__UpperCAmelCase : int = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Tuple = processor(images=__lowerCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[int] = encoding["""pixel_values"""]
__UpperCAmelCase : List[str] = model(pixel_values.to(__lowerCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
__UpperCAmelCase : int = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCamelCase ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 114 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=24 , UpperCamelCase__=2 , UpperCamelCase__=6 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> int:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = scope
A_ = range_bbox
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = t
A_ = None
if self.use_input_mask:
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Any:
'''simple docstring'''
A_ = LiltModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
A_ = self.num_labels
A_ = LiltForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
A_ = LiltForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
A_
) = config_and_inputs
A_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return True
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = LiltModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LiltModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(_SCREAMING_SNAKE_CASE )
A_ = torch.tensor([[1, 2]] , device=_SCREAMING_SNAKE_CASE )
A_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
A_ = torch.Size([1, 2, 768] )
A_ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 364 |
'''simple docstring'''
import sys
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ ( UpperCAmelCase__ = N ) -> int:
A_ = -sys.maxsize - 1
for i in range(len(UpperCAmelCase__ ) - 12 ):
A_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase ( UpperCamelCase__ ):
return x + 2
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
UpperCAmelCase__ : Optional[int] = """x = y"""
UpperCAmelCase__ : Optional[Any] = {"""y""": 5}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 5, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Any = """y = add_two(x)"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = """x = 3\ny = 5"""
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Dict = """text = f'This is x: {x}.'"""
UpperCAmelCase__ : str = {"""x""": 3}
UpperCAmelCase__ : Optional[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """text""": """This is x: 3."""})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 2})
UpperCAmelCase__ : Optional[int] = {"""x""": 8}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 8, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_list = [x, add_two(x)]"""
UpperCAmelCase__ : int = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [3, 5])
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = """y = x"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]"""
UpperCAmelCase__ : Union[str, Any] = {"""x""": 3}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
UpperCAmelCase__ : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """x = 0\nfor i in range(3):\n x = i"""
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""range""": range} , state=_lowerCamelCase)
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 2, """i""": 2}) | 163 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( a__ ):
lowerCAmelCase :Dict = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Union[str, Any] = '''BlipImageProcessor'''
lowerCAmelCase :Any = '''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
# add QFormer tokenizer
UpperCAmelCase__ : List[str] = qformer_tokenizer
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""")
UpperCAmelCase__ : List[str] = BatchFeature()
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
encoding.update(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.qformer_tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Dict = qformer_text_encoding.pop("""input_ids""")
UpperCAmelCase__ : Tuple = qformer_text_encoding.pop("""attention_mask""")
if images is not None:
UpperCAmelCase__ : List[str] = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase):
if os.path.isfile(_lowerCamelCase):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''')
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , """qformer_tokenizer""")
self.qformer_tokenizer.save_pretrained(_lowerCamelCase)
return super().save_pretrained(_lowerCamelCase , **_lowerCamelCase)
@classmethod
def snake_case__ ( cls , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_lowerCamelCase , subfolder="""qformer_tokenizer""")
UpperCAmelCase__ : List[Any] = cls._get_arguments_from_pretrained(_lowerCamelCase , **_lowerCamelCase)
args.append(_lowerCamelCase)
return cls(*_lowerCamelCase) | 163 | 1 |
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase :int = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """beit"""
def __init__( self : Optional[Any] , _A : Any=8192 , _A : int=768 , _A : Tuple=12 , _A : Tuple=12 , _A : str=3072 , _A : List[Any]="gelu" , _A : int=0.0 , _A : Dict=0.0 , _A : int=0.02 , _A : str=1E-12 , _A : int=224 , _A : Union[str, Any]=16 , _A : Dict=3 , _A : Optional[Any]=False , _A : int=False , _A : int=False , _A : List[Any]=False , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : List[Any]=True , _A : int=[3, 5, 7, 11] , _A : Tuple=[1, 2, 3, 6] , _A : Union[str, Any]=True , _A : Optional[int]=0.4 , _A : Union[str, Any]=256 , _A : Tuple=1 , _A : Dict=False , _A : int=255 , **_A : Any , ) -> Dict:
super().__init__(**_A )
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : int = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Any = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : Optional[int] = image_size
__magic_name__ : Union[str, Any] = patch_size
__magic_name__ : Dict = num_channels
__magic_name__ : Dict = use_mask_token
__magic_name__ : List[str] = use_absolute_position_embeddings
__magic_name__ : str = use_relative_position_bias
__magic_name__ : Optional[Any] = use_shared_relative_position_bias
__magic_name__ : Dict = layer_scale_init_value
__magic_name__ : Any = drop_path_rate
__magic_name__ : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
__magic_name__ : Dict = out_indices
__magic_name__ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__magic_name__ : Optional[Any] = use_auxiliary_head
__magic_name__ : Optional[Any] = auxiliary_loss_weight
__magic_name__ : Any = auxiliary_channels
__magic_name__ : str = auxiliary_num_convs
__magic_name__ : Optional[Any] = auxiliary_concat_input
__magic_name__ : str = semantic_loss_ignore_index
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : Tuple ) -> float:
return 1E-4 | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _snake_case ( unittest.TestCase):
def A__ ( self : Tuple ):
lowercase__ = tempfile.mkdtemp()
lowercase__ = BlipImageProcessor()
lowercase__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowercase__ = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
lowercase__ = InstructBlipProcessor(__lowercase, __lowercase, __lowercase )
processor.save_pretrained(self.tmpdirname )
def A__ ( self : List[str], **__lowercase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowercase ).tokenizer
def A__ ( self : Any, **__lowercase : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowercase ).image_processor
def A__ ( self : Any, **__lowercase : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowercase ).qformer_tokenizer
def A__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def A__ ( self : int ):
lowercase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(__lowercase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self : List[Any] ):
lowercase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=__lowercase, padding_value=1.0 )
lowercase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__lowercase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, __lowercase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowercase )
self.assertIsInstance(processor.qformer_tokenizer, __lowercase )
def A__ ( self : Dict ):
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=__lowercase, image_processor=__lowercase, qformer_tokenizer=__lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(__lowercase, return_tensors="np" )
lowercase__ = processor(images=__lowercase, return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def A__ ( self : Dict ):
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=__lowercase, image_processor=__lowercase, qformer_tokenizer=__lowercase )
lowercase__ = "lower newer"
lowercase__ = processor(text=__lowercase )
lowercase__ = tokenizer(__lowercase, return_token_type_ids=__lowercase )
lowercase__ = qformer_tokenizer(__lowercase, return_token_type_ids=__lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["qformer_" + key] )
def A__ ( self : Tuple ):
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=__lowercase, image_processor=__lowercase, qformer_tokenizer=__lowercase )
lowercase__ = "lower newer"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=__lowercase, images=__lowercase )
self.assertListEqual(
list(inputs.keys() ), ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"], )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def A__ ( self : List[str] ):
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=__lowercase, image_processor=__lowercase, qformer_tokenizer=__lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(__lowercase )
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
def A__ ( self : str ):
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=__lowercase, image_processor=__lowercase, qformer_tokenizer=__lowercase )
lowercase__ = "lower newer"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=__lowercase, images=__lowercase )
self.assertListEqual(
list(inputs.keys() ), ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"], )
| 224 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 224 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> dict[str, float]:
_a : Tuple = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_a : Dict = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_a : Dict = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_a : int = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_a : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase_ )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase ( _a , _a , _a = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowercase_ :Dict = tau * frequency / samplerate
lowercase_ :Any = sin(_a )
lowercase_ :Union[str, Any] = cos(_a )
lowercase_ :Dict = _sin / (2 * q_factor)
lowercase_ :int = (1 - _cos) / 2
lowercase_ :Any = 1 - _cos
lowercase_ :Dict = 1 + alpha
lowercase_ :str = -2 * _cos
lowercase_ :List[Any] = 1 - alpha
lowercase_ :str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _a , _a , _a = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowercase_ :str = tau * frequency / samplerate
lowercase_ :Optional[Any] = sin(_a )
lowercase_ :Optional[int] = cos(_a )
lowercase_ :Optional[Any] = _sin / (2 * q_factor)
lowercase_ :Union[str, Any] = (1 + _cos) / 2
lowercase_ :Optional[int] = -1 - _cos
lowercase_ :str = 1 + alpha
lowercase_ :Any = -2 * _cos
lowercase_ :Dict = 1 - alpha
lowercase_ :Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _a , _a , _a = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowercase_ :Optional[Any] = tau * frequency / samplerate
lowercase_ :Union[str, Any] = sin(_a )
lowercase_ :List[str] = cos(_a )
lowercase_ :Tuple = _sin / (2 * q_factor)
lowercase_ :List[str] = _sin / 2
lowercase_ :Union[str, Any] = 0
lowercase_ :List[Any] = -ba
lowercase_ :Union[str, Any] = 1 + alpha
lowercase_ :Tuple = -2 * _cos
lowercase_ :List[str] = 1 - alpha
lowercase_ :List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _a , _a , _a = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowercase_ :Dict = tau * frequency / samplerate
lowercase_ :List[str] = sin(_a )
lowercase_ :Dict = cos(_a )
lowercase_ :List[Any] = _sin / (2 * q_factor)
lowercase_ :Dict = 1 - alpha
lowercase_ :Tuple = -2 * _cos
lowercase_ :List[str] = 1 + alpha
lowercase_ :List[str] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _a , _a , _a , _a = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
lowercase_ :Optional[int] = tau * frequency / samplerate
lowercase_ :Dict = sin(_a )
lowercase_ :int = cos(_a )
lowercase_ :List[Any] = _sin / (2 * q_factor)
lowercase_ :Optional[int] = 1_0 ** (gain_db / 4_0)
lowercase_ :int = 1 + alpha * big_a
lowercase_ :Union[str, Any] = -2 * _cos
lowercase_ :int = 1 - alpha * big_a
lowercase_ :Optional[int] = 1 + alpha / big_a
lowercase_ :Any = -2 * _cos
lowercase_ :Any = 1 - alpha / big_a
lowercase_ :Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _a , _a , _a , _a = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
lowercase_ :Union[str, Any] = tau * frequency / samplerate
lowercase_ :List[Any] = sin(_a )
lowercase_ :Any = cos(_a )
lowercase_ :Any = _sin / (2 * q_factor)
lowercase_ :int = 1_0 ** (gain_db / 4_0)
lowercase_ :Tuple = (big_a + 1) - (big_a - 1) * _cos
lowercase_ :Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ :Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ :Tuple = (big_a - 1) + (big_a + 1) * _cos
lowercase_ :Any = 2 * sqrt(_a ) * alpha
lowercase_ :Tuple = big_a * (pmc + aaa)
lowercase_ :Dict = 2 * big_a * mpc
lowercase_ :List[Any] = big_a * (pmc - aaa)
lowercase_ :Union[str, Any] = ppmc + aaa
lowercase_ :int = -2 * pmpc
lowercase_ :Tuple = ppmc - aaa
lowercase_ :Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase ( _a , _a , _a , _a = 1 / sqrt(2 ) , ) -> IIRFilter:
'''simple docstring'''
lowercase_ :str = tau * frequency / samplerate
lowercase_ :Union[str, Any] = sin(_a )
lowercase_ :Any = cos(_a )
lowercase_ :Optional[int] = _sin / (2 * q_factor)
lowercase_ :Any = 1_0 ** (gain_db / 4_0)
lowercase_ :str = (big_a + 1) - (big_a - 1) * _cos
lowercase_ :Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ :List[Any] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ :Optional[int] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ :List[Any] = 2 * sqrt(_a ) * alpha
lowercase_ :List[str] = big_a * (ppmc + aaa)
lowercase_ :str = -2 * big_a * pmpc
lowercase_ :List[Any] = big_a * (ppmc - aaa)
lowercase_ :List[str] = pmc + aaa
lowercase_ :Dict = 2 * mpc
lowercase_ :int = pmc - aaa
lowercase_ :List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 252 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[int] ="""decision_transformer"""
lowercase : Dict =["""past_key_values"""]
lowercase : Any ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=17 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=1024 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_="relu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=False , UpperCamelCase_=False , **UpperCamelCase_ , ):
lowercase_ :Any = state_dim
lowercase_ :List[str] = act_dim
lowercase_ :List[str] = hidden_size
lowercase_ :int = max_ep_len
lowercase_ :List[str] = action_tanh
lowercase_ :Any = vocab_size
lowercase_ :List[Any] = n_positions
lowercase_ :List[str] = n_layer
lowercase_ :Optional[Any] = n_head
lowercase_ :int = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :List[str] = resid_pdrop
lowercase_ :Dict = embd_pdrop
lowercase_ :List[Any] = attn_pdrop
lowercase_ :Union[str, Any] = layer_norm_epsilon
lowercase_ :List[str] = initializer_range
lowercase_ :Any = scale_attn_weights
lowercase_ :Union[str, Any] = use_cache
lowercase_ :Any = scale_attn_by_inverse_layer_idx
lowercase_ :Tuple = reorder_and_upcast_attn
lowercase_ :int = bos_token_id
lowercase_ :List[str] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model'''}
lowerCamelCase__ : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCamelCase__ : Optional[Any] = {
'''google/rembert''': 2_56,
}
class _UpperCAmelCase ( __a):
__a : Tuple = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _A , _A=False , _A=True , _A=True , _A="[CLS]" , _A="[SEP]" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , **_A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
_UpperCAmelCase : List[str] = do_lower_case
_UpperCAmelCase : str = remove_space
_UpperCAmelCase : str = keep_accents
_UpperCAmelCase : List[Any] = vocab_file
_UpperCAmelCase : int = spm.SentencePieceProcessor()
self.sp_model.Load(_A )
@property
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Any = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
return state
def __setstate__( self , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = d
_UpperCAmelCase : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , _A , _A=False ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.sp_model.EncodeAsPieces(_A )
return pieces
def __snake_case ( self , _A ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __snake_case ( self , _A ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __snake_case ( self , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.sp_model.decode_pieces(_A )
return out_string
def __snake_case ( self , _A , _A = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self , _A , _A = None , _A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def __snake_case ( self , _A , _A = None ) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , _A , _A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_A ) )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 246 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int ) -> int:
_UpperCAmelCase : str = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCAmelCase : Dict = n - k
# Calculate C(n,k)
for i in range(_lowerCAmelCase ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
return binomial_coefficient(2 * node_count, _lowerCAmelCase ) // (node_count + 1)
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
_UpperCAmelCase : str = 1
for i in range(1, n + 1 ):
result *= i
return result
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
return catalan_number(_lowerCAmelCase ) * factorial(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 246 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Dict = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Dict = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = '''lower newer'''
snake_case_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : str = '''lower'''
snake_case_ : List[str] = ['''low''', '''er</w>''']
snake_case_ : Optional[int] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : str = tokens + ['''<unk>''']
snake_case_ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> int:
"""simple docstring"""
try:
snake_case_ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : Union[str, Any] = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase=None , _UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , _UpperCamelCase ) , f'''test requires torch version >= {version}''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_UpperCamelCase )
lowerCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_UpperCamelCase )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : str = True
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = tempfile.mkdtemp()
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__magic_name__ )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : str = mocks if isinstance(__magic_name__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = AcceleratorState()
snake_case_ : List[str] = tensor[None].clone().to(state.device )
snake_case_ : Optional[Any] = gather(_UpperCamelCase ).cpu()
snake_case_ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = returncode
snake_case_ : List[Any] = stdout
snake_case_ : Tuple = stderr
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
while True:
snake_case_ : Tuple = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) )
snake_case_ : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : List[Any] = []
snake_case_ : List[Any] = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
snake_case_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput:
"""simple docstring"""
snake_case_ : List[str] = asyncio.get_event_loop()
snake_case_ : List[Any] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case_ : Optional[int] = ''' '''.join(_UpperCamelCase )
if result.returncode > 0:
snake_case_ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( _a ):
pass
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
try:
snake_case_ : List[str] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , '''decode''' ):
snake_case_ : Tuple = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 279 | 1 |
import inspect
import unittest
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __A ( self : Union[str, Any] ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE_ = inspect.getmembers(__magic_name__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE_ = "k-diffusion"
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE_ = "invisible-watermark"
assert backend in deps, F'''{backend} is not in the deps table!'''
| 118 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Dict = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''retribert'''
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=30_522 , __magic_name__ : int=768 , __magic_name__ : Dict=8 , __magic_name__ : List[Any]=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=2 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : List[str]=True , __magic_name__ : Dict=128 , __magic_name__ : Union[str, Any]=0 , **__magic_name__ : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = share_encoders
SCREAMING_SNAKE_CASE_ = projection_dim
| 118 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=None ) -> Dict:
if "." in tensor_name:
A_ : Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
A_ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
A_ : int = new_module
A_ : List[str] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
A_ : Union[str, Any] = tensor_name in module._buffers
A_ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
A_ : Union[str, Any] = False
A_ : str = False
if is_buffer or not is_bitsandbytes_available():
A_ : Optional[Any] = False
A_ : Optional[int] = False
else:
A_ : Optional[int] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A_ : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A_ : Any = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A_ : List[str] = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
A_ : int = value.to('''cpu''' )
if value.dtype == torch.inta:
A_ : Optional[int] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
A_ : Dict = torch.tensor(__lowerCAmelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
A_ : Dict = new_value.T
A_ : List[Any] = old_value.__dict__
if is_abit:
A_ : List[str] = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
A_ : Optional[int] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
A_ : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
A_ : List[str] = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
A_ : Tuple = value.to(__lowerCAmelCase )
else:
A_ : Dict = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
A_ : Union[str, Any] = new_value
else:
A_ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
A_ : int = new_value
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : str=False ) -> Optional[int]:
for name, module in model.named_children():
if current_key_name is None:
A_ : str = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A_ : Tuple = module.weight.shape
else:
A_ : Tuple = module.in_features
A_ : List[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A_ : int = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A_ : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A_ : Tuple = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A_ : List[str] = True
# Store the module class in case we need to transpose the weight later
A_ : List[str] = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
A_ : List[str] = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[int]=None ) -> str:
A_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
A_ : str = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) -> Tuple:
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case__ ( *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ) -> Dict:
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[str]:
A_ : str = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A_ : Optional[int] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A_ : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A_ : Optional[Any] = sum(__lowerCAmelCase , [] )
A_ : List[Any] = len(__lowerCAmelCase ) > 0
# Check if it is a base model
A_ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : str = list(model.named_children() )
A_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
A_ : int = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
A_ : List[Any] = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
A_ : List[Any] = ['''.weight''', '''.bias''']
A_ : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : int = name.replace(__lowerCAmelCase , '''''' )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 361 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _A ( UpperCamelCase_):
SCREAMING_SNAKE_CASE : Any = """mgp-str"""
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Dict = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = max_token_length
SCREAMING_SNAKE_CASE_ : int = num_character_labels
SCREAMING_SNAKE_CASE_ : str = num_bpe_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_wordpiece_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE_ : List[Any] = distilled
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = drop_rate
SCREAMING_SNAKE_CASE_ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE_ : Dict = attn_drop_rate
SCREAMING_SNAKE_CASE_ : Any = drop_path_rate
SCREAMING_SNAKE_CASE_ : str = output_aa_attentions
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
| 253 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__snake_case = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class lowercase__ ( unittest.TestCase ):
@classmethod
def A_ ( cls : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def A_ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id='test-config' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def A_ ( self : Tuple ):
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class lowercase__ ( unittest.TestCase ):
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE__ = c.n_embd + 1 # int
SCREAMING_SNAKE_CASE__ = c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE__ = not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE__ = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(UpperCAmelCase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase_ , c.summary_type , 'mismatch for key: summary_type' )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = PretrainedConfig()
SCREAMING_SNAKE_CASE__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
SCREAMING_SNAKE_CASE__ = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(UpperCAmelCase_ )}.' )
def A_ ( self : List[str] ):
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase_ )
def A_ ( self : str ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : Optional[Any] ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE__ = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE__ = ['config.42.0.0.json']
SCREAMING_SNAKE_CASE__ = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase_ , 'config.42.0.0.json' ) )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def A_ ( self : Tuple ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
SCREAMING_SNAKE_CASE__ = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
SCREAMING_SNAKE_CASE__ = 'v4.0.0'
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE__ = 'v3.0.0'
SCREAMING_SNAKE_CASE__ = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 169 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 169 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError('Invalid Input' )
lowercase : Dict = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
lowercase : Union[str, Any] = current_sum - array[i] + array[i + k]
lowercase : Optional[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCamelCase: Any = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
_UpperCamelCase: int = randint(0, 1_1_0)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 255 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : List[Any]=None ) -> Optional[Any]:
lowercase : str = file_names
lowercase : Optional[Any] = image_transform
lowercase : int = label_to_id
def __len__( self : List[Any] ) -> Any:
return len(self.file_names )
def __getitem__( self : str, lowerCAmelCase : Optional[int] ) -> Optional[Any]:
lowercase : List[Any] = self.file_names[idx]
lowercase : Tuple = PIL.Image.open(lowerCAmelCase )
lowercase : Tuple = raw_image.convert('RGB' )
if self.image_transform is not None:
lowercase : Optional[Any] = self.image_transform(lowerCAmelCase )
lowercase : Any = extract_label(lowerCAmelCase )
if self.label_to_id is not None:
lowercase : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if args.with_tracking:
lowercase : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Union[str, Any] = config['lr']
lowercase : Any = int(config['num_epochs'] )
lowercase : Union[str, Any] = int(config['seed'] )
lowercase : List[Any] = int(config['batch_size'] )
lowercase : str = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowercase : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowercase : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase : Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase : Optional[int] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowercase : Optional[Any] = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowercase : str = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowercase : List[Any] = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowercase : Optional[Any] = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowercase : List[Any] = np.random.permutation(len(_UpperCAmelCase ) )
lowercase : Optional[Any] = int(0.8 * len(_UpperCAmelCase ) )
lowercase : int = random_perm[:cut]
lowercase : Any = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase : Dict = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowercase : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowercase : List[Any] = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowercase : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowercase : Dict = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowercase : Any = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase : Dict = False
for param in model.get_classifier().parameters():
lowercase : Dict = True
# We normalize the batches of images to be a bit faster.
lowercase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowercase : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase : List[Any] = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase : Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase : Any = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowercase : List[Any] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowercase : List[Any] = None
else:
lowercase : Optional[Any] = int(training_difference.replace('step_' , '' ) )
lowercase : int = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowercase : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase : Any = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : List[str] = (batch['image'] - mean) / std
lowercase : Union[str, Any] = model(_UpperCAmelCase )
lowercase : Optional[int] = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Union[str, Any] = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase : Optional[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowercase : int = 0
lowercase : List[Any] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : Optional[Any] = (batch['image'] - mean) / std
with torch.no_grad():
lowercase : int = model(_UpperCAmelCase )
lowercase : Tuple = outputs.argmax(dim=-1 )
lowercase , lowercase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowercase : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase : List[str] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowercase : str = f'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase : Any = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> Tuple:
'''simple docstring'''
lowercase : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase : int = parser.parse_args()
lowercase : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 255 | 1 |
from math import pow, sqrt
def lowerCAmelCase_ ( *UpperCamelCase_ ) -> bool:
UpperCamelCase_ = len(UpperCamelCase_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 328 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class lowerCamelCase__ ( logging.LoggerAdapter ):
@staticmethod
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
snake_case : List[Any] = kwargs.pop("main_process_only" , SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = kwargs.pop("in_order" , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
snake_case , snake_case : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
snake_case : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case , snake_case : str = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : str = None ):
if log_level is None:
snake_case : Optional[int] = os.environ.get("ACCELERATE_LOG_LEVEL" , lowercase__ )
snake_case : List[Any] = logging.getLogger(lowercase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase__ , {} )
| 148 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : str = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_attention_mask
snake_case : str = use_token_type_ids
snake_case : int = use_labels
snake_case : Any = vocab_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_attention_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : str = config_and_inputs
snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : List[Any] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=SCREAMING_SNAKE_CASE )
snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
snake_case : List[Any] = 50_000
snake_case : List[str] = (1, 6, vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = """https://pypi.org/pypi/diffusers/json"""
UpperCAmelCase_ : List[Any] = json.loads(request.urlopen(_a ).read() )["""releases"""].keys()
return sorted(_a , key=lambda _a : version.Version(_a ) )
def lowerCamelCase_ ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_a )
os.makedirs(_a , exist_ok=_a )
UpperCAmelCase_ : Union[str, Any] = Path(_a ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def lowerCamelCase_ ( _a : Union[str, os.PathLike] ):
'''simple docstring'''
init_hf_modules()
UpperCAmelCase_ : str = Path(_a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_a , exist_ok=_a )
UpperCAmelCase_ : Any = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
with open(_a , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase_ : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , _a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , _a , flags=re.MULTILINE )
# Unique-ify
return list(set(_a ) )
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Union[str, Any] = [module_file]
UpperCAmelCase_ : str = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase_ : Any = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_a ) )
UpperCAmelCase_ : List[str] = Path(_a ).parent
UpperCAmelCase_ : str = [str(module_path / m ) for m in new_imports]
UpperCAmelCase_ : Dict = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase_ : List[str] = [F'''{f}.py''' for f in new_import_files]
UpperCAmelCase_ : Optional[Any] = len(_a ) == 0
all_relative_imports.extend(_a )
return all_relative_imports
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
with open(_a , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
# Imports of the form `import xxx`
UpperCAmelCase_ : Union[str, Any] = re.findall("""^\s*import\s+(\S+)\s*$""" , _a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , _a , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase_ : Dict = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
UpperCAmelCase_ : str = list(set(_a ) )
UpperCAmelCase_ : List[str] = []
for imp in imports:
try:
importlib.import_module(_a )
except ImportError:
missing_packages.append(_a )
if len(_a ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{', '.join(_a )}. Run `pip install {' '.join(_a )}`''' )
return get_relative_imports(_a )
def lowerCamelCase_ ( _a : Dict , _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = module_path.replace(os.path.sep , """.""" )
UpperCAmelCase_ : Tuple = importlib.import_module(_a )
if class_name is None:
return find_pipeline_class(_a )
return getattr(_a , _a )
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
UpperCAmelCase_ : Tuple = dict(inspect.getmembers(_a , inspect.isclass ) )
UpperCAmelCase_ : List[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _a )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
UpperCAmelCase_ : Optional[int] = cls
return pipeline_class
def lowerCamelCase_ ( _a : Union[str, os.PathLike] , _a : str , _a : Optional[Union[str, os.PathLike]] = None , _a : bool = False , _a : bool = False , _a : Optional[Dict[str, str]] = None , _a : Optional[Union[bool, str]] = None , _a : Optional[str] = None , _a : bool = False , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(_a )
UpperCAmelCase_ : Tuple = os.path.join(_a , _a )
if os.path.isfile(_a ):
UpperCAmelCase_ : List[Any] = module_file_or_url
UpperCAmelCase_ : List[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
UpperCAmelCase_ : List[Any] = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase_ : Optional[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase_ : List[str] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
UpperCAmelCase_ : Optional[Any] = F'''v{revision}'''
elif revision == "main":
UpperCAmelCase_ : Any = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
UpperCAmelCase_ : str = COMMUNITY_PIPELINES_URL.format(revision=_a , pipeline=_a )
try:
UpperCAmelCase_ : Dict = cached_download(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , )
UpperCAmelCase_ : Dict = """git"""
UpperCAmelCase_ : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase_ : Tuple = hf_hub_download(
_a , _a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , )
UpperCAmelCase_ : Union[str, Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
UpperCAmelCase_ : str = check_imports(_a )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase_ : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_a )
UpperCAmelCase_ : str = Path(_a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_a , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase_ : Dict = F'''{module_needed}.py'''
shutil.copy(os.path.join(_a , _a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_a , _a ):
UpperCAmelCase_ : List[str] = use_auth_token
elif use_auth_token is True:
UpperCAmelCase_ : Dict = HfFolder.get_token()
else:
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = model_info(_a , revision=_a , token=_a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase_ : Any = submodule_path / commit_hash
UpperCAmelCase_ : Dict = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_a )
if not (submodule_path / module_file).exists():
shutil.copy(_a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_a , F'''{module_needed}.py''' , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
return os.path.join(_a , _a )
def lowerCamelCase_ ( _a : Union[str, os.PathLike] , _a : str , _a : Optional[str] = None , _a : Optional[Union[str, os.PathLike]] = None , _a : bool = False , _a : bool = False , _a : Optional[Dict[str, str]] = None , _a : Optional[Union[bool, str]] = None , _a : Optional[str] = None , _a : bool = False , **_a : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = get_cached_module_file(
_a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
return get_class_in_module(_a , final_module.replace(""".py""" , """""" ) )
| 363 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowerCamelCase_ ( _a : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase_ ( _a : np.ndarray , _a : np.ndarray , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.nan
for i in range(_a ):
UpperCAmelCase_ : str = features[:, labels == i]
UpperCAmelCase_ : int = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase_ : int = data - column_reshape(_a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : int = np.dot(_a , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a : np.ndarray , _a : np.ndarray , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = features.mean(1 )
UpperCAmelCase_ : Union[str, Any] = np.nan
for i in range(_a ):
UpperCAmelCase_ : str = features[:, labels == i]
UpperCAmelCase_ : Dict = data.shape[1]
UpperCAmelCase_ : Any = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : List[Any] = device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a : np.ndarray , _a : int ):
'''simple docstring'''
if features.any():
UpperCAmelCase_ : Dict = features.mean(1 )
# Center the dataset
UpperCAmelCase_ : Any = features - np.reshape(_a , (data_mean.size, 1) )
UpperCAmelCase_ : Tuple = np.dot(_a , centered_data.T ) / features.shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = np.linalg.eigh(_a )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase_ : List[str] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase_ : Dict = np.dot(filtered_eigenvectors.T , _a )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_a )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase_ ( _a : np.ndarray , _a : np.ndarray , _a : int , _a : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = eigh(
covariance_between_classes(_a , _a , _a ) , covariance_within_classes(_a , _a , _a ) , )
UpperCAmelCase_ : int = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = np.linalg.svd(_a )
UpperCAmelCase_ : Dict = svd_matrix[:, 0:dimensions]
UpperCAmelCase_ : Optional[Any] = np.dot(filtered_svd_matrix.T , _a )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_a )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase_ : List[str] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_a ) as error_info:
UpperCAmelCase_ : Dict = linear_discriminant_analysis(
_a , _a , _a , _a )
if isinstance(_a , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Tuple = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_a ) as error_info:
UpperCAmelCase_ : Union[str, Any] = principal_component_analysis(_a , _a )
if not np.allclose(_a , _a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( __snake_case : tuple[int, int] , __snake_case : int ):
lowercase_ , lowercase_ : int = position
lowercase_ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase_ : Union[str, Any] = []
for position in positions:
lowercase_ , lowercase_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase ( __snake_case : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def lowercase ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
lowercase_ , lowercase_ : str = position
if board[y][x] == 0:
lowercase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
lowercase_ : Optional[int] = 0
return False
def lowercase ( __snake_case : int ):
lowercase_ : Optional[Any] = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
lowercase_ : Dict = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
lowercase_ : Optional[int] = 0
lowercase_ : int = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | 0 |
'''simple docstring'''
from PIL import Image
def __a ( _UpperCamelCase: Image , _UpperCamelCase: int ) -> Image:
"""simple docstring"""
_snake_case = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_UpperCamelCase: int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase_ : Tuple = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 359 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """vit_msn"""
def __init__( self ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-06 ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
| 142 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A__ ( A__ ):
A__ = 'xglm'
A__ = ['past_key_values']
A__ = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , _a : Tuple=25_6008 , _a : int=2048 , _a : List[str]=1024 , _a : Dict=4096 , _a : str=24 , _a : Optional[Any]=16 , _a : List[Any]="gelu" , _a : Optional[Any]=0.1 , _a : Any=0.1 , _a : str=0.0 , _a : Dict=0.0 , _a : List[Any]=0.02 , _a : int=True , _a : int=True , _a : List[Any]=2 , _a : Optional[Any]=1 , _a : Dict=0 , _a : List[Any]=2 , **_a : List[Any] , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =ffn_dim
_SCREAMING_SNAKE_CASE =num_layers
_SCREAMING_SNAKE_CASE =attention_heads
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =layerdrop
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE =use_cache
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 369 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =checkpoint
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =[key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"down.{i}.block", 'new': f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =num_up_blocks - 1 - i
_SCREAMING_SNAKE_CASE =[
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"up.{block_id}.block", 'new': f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
return new_checkpoint
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_SCREAMING_SNAKE_CASE =io.BytesIO(r.content )
_SCREAMING_SNAKE_CASE =OmegaConf.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =5_12
_SCREAMING_SNAKE_CASE ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_SCREAMING_SNAKE_CASE ={}
with safe_open(_UpperCamelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_SCREAMING_SNAKE_CASE =f.get_tensor(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location=_UpperCamelCase )['state_dict']
# Convert the VAE model.
_SCREAMING_SNAKE_CASE =create_vae_diffusers_config(_UpperCamelCase , image_size=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =custom_convert_ldm_vae_checkpoint(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =AutoencoderKL(**_UpperCamelCase )
vae.load_state_dict(_UpperCamelCase )
vae.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 114 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : int = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'blenderbot-small'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
a :Dict = vocab_size
a :Optional[Any] = max_position_embeddings
a :str = d_model
a :Any = encoder_ffn_dim
a :Optional[int] = encoder_layers
a :List[str] = encoder_attention_heads
a :List[str] = decoder_ffn_dim
a :Optional[int] = decoder_layers
a :str = decoder_attention_heads
a :List[str] = dropout
a :Optional[int] = attention_dropout
a :Dict = activation_dropout
a :List[str] = activation_function
a :List[Any] = init_std
a :Optional[int] = encoder_layerdrop
a :Tuple = decoder_layerdrop
a :List[str] = use_cache
a :int = encoder_layers
a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class _snake_case ( _snake_case ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a :Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a :Union[str, Any] = {0: '''batch'''}
a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
a :str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
a :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a , a :str = self.num_layers
for i in range(_lowerCamelCase ):
a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
a :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a :List[Any] = super().outputs
else:
a :Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
a , a :int = self.num_layers
for i in range(_lowerCamelCase ):
a :int = {0: '''batch''', 2: '''past_sequence + sequence'''}
a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
a :Dict = seq_length if not self.use_past else 1
a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :Optional[Any] = common_inputs['''input_ids'''].shape
a :Tuple = common_inputs['''decoder_input_ids'''].shape[1]
a , a :List[Any] = self.num_attention_heads
a :List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a :int = decoder_seq_length + 3
a :Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a :Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
a :List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a :Optional[int] = self.num_layers
a :str = min(_lowerCamelCase , _lowerCamelCase )
a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a :Optional[int] = seqlen + 2
a , a :Union[str, Any] = self.num_layers
a , a :Optional[Any] = self.num_attention_heads
a :str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a :Tuple = common_inputs['''attention_mask'''].dtype
a :Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
a :Any = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a :Optional[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
a :Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
a :Dict = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 94 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCAmelCase : Union[str, Any] = 8
def A ( lowercase , lowercase=BITS ) -> Any:
'''simple docstring'''
UpperCamelCase = x.device
UpperCamelCase = (x * 255).int().clamp(0 , 255 )
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase )
UpperCamelCase = rearrange(lowercase , 'd -> d 1 1' )
UpperCamelCase = rearrange(lowercase , 'b c h w -> b c 1 h w' )
UpperCamelCase = ((x & mask) != 0).float()
UpperCamelCase = rearrange(lowercase , 'b c d h w -> b (c d) h w' )
UpperCamelCase = bits * 2 - 1
return bits
def A ( lowercase , lowercase=BITS ) -> int:
'''simple docstring'''
UpperCamelCase = x.device
UpperCamelCase = (x > 0).int()
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase , dtype=torch.intaa )
UpperCamelCase = rearrange(lowercase , 'd -> d 1 1' )
UpperCamelCase = rearrange(lowercase , 'b (c d) h w -> b c d h w' , d=8 )
UpperCamelCase = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def A ( self , lowercase , lowercase , lowercase , lowercase = 0.0 , lowercase = True , lowercase=None , lowercase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[timestep]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(lowercase , -scale , lowercase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCamelCase = self._get_variance(lowercase , lowercase )
UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCamelCase = model_output.device if torch.is_tensor(lowercase ) else 'cpu'
UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase ).to(lowercase )
UpperCamelCase = self._get_variance(lowercase , lowercase ) ** 0.5 * eta * noise
UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def A ( self , lowercase , lowercase , lowercase , lowercase="epsilon" , lowercase=None , lowercase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCamelCase , UpperCamelCase = torch.split(lowercase , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(lowercase , -scale , lowercase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase ).to(model_output.device )
UpperCamelCase = (self._get_variance(lowercase , predicted_variance=lowercase ) ** 0.5) * noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ = 1.0 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = bit_scale
UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 256 , A_ = 256 , A_ = 50 , A_ = None , A_ = 1 , A_ = "pil" , A_ = True , **A_ , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A_ , )
UpperCamelCase = decimal_to_bits(A_ ) * self.bit_scale
UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCamelCase = self.unet(A_ , A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , A_ , A_ ).prev_sample
UpperCamelCase = bits_to_decimal(A_ )
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 362 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Union[str, Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase : Optional[int] = False
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCamelCase = DDIMScheduler(**A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCamelCase = init_image.resize((512, 512) )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase = torch.from_numpy(np.array(A_ ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = 'A robot, 4k photo'
UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
UpperCamelCase = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 110 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
_a = {
'''google/fnet-base''': 5_1_2,
'''google/fnet-large''': 5_1_2,
}
_a = '''▁'''
class A_ ( snake_case__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ['input_ids', 'token_type_ids']
_lowercase : int = FNetTokenizer
def __init__( self : Dict , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False , UpperCAmelCase : int=True , UpperCAmelCase : str=True , UpperCAmelCase : Tuple="<unk>" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[str]="[MASK]" , **UpperCAmelCase : Optional[Any] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCAmelCase: Optional[Any] = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase: str = do_lower_case
__lowerCAmelCase: Union[str, Any] = remove_space
__lowerCAmelCase: Union[str, Any] = keep_accents
__lowerCAmelCase: Dict = vocab_file
__lowerCAmelCase: Tuple = False if not self.vocab_file else True
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: Dict = [self.sep_token_id]
__lowerCAmelCase: List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: int = [self.sep_token_id]
__lowerCAmelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase: Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 322 |
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowerCAmelCase : Dict = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_lowerCAmelCase : int = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_lowerCAmelCase : Tuple = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def snake_case_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def snake_case_ ( self : List[Any] , A : Dict , A : int ):
_UpperCAmelCase : List[str] = 0.0
for i, j in zip(a__ , a__ ):
n_correct += 1.0 if math_equivalence.is_equiv(a__ , a__ ) else 0.0
_UpperCAmelCase : Any = n_correct / len(a__ )
return {
"accuracy": accuracy,
}
| 364 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = 'owlvit_text_model'
def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = initializer_factor
@classmethod
def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model'
def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ):
super().__init__(**A )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = initializer_factor
@classmethod
def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'owlvit'
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ):
super().__init__(**A )
if text_config is None:
_UpperCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : str = OwlViTTextConfig(**A )
_UpperCAmelCase : int = OwlViTVisionConfig(**A )
_UpperCAmelCase : Optional[Any] = projection_dim
_UpperCAmelCase : str = logit_scale_init_value
_UpperCAmelCase : Optional[Any] = return_dict
_UpperCAmelCase : str = 1.0
@classmethod
def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
@classmethod
def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : Dict = vision_config
return cls.from_dict(A , **A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _UpperCamelCase ):
@property
def snake_case_ ( self : List[str] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def snake_case_ ( self : Optional[int] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def snake_case_ ( self : str ):
return 1e-4
def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ):
_UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self : List[Any] ):
return 1_4
| 202 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'owlvit_text_model'
def __init__( self : List[str] , __lowercase : str=49408 , __lowercase : Any=512 , __lowercase : Dict=2048 , __lowercase : Optional[Any]=12 , __lowercase : Optional[Any]=8 , __lowercase : List[Any]=16 , __lowercase : Tuple="quick_gelu" , __lowercase : List[Any]=1e-5 , __lowercase : Tuple=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Union[str, Any]=1.0 , __lowercase : Union[str, Any]=0 , __lowercase : List[str]=49406 , __lowercase : Optional[int]=49407 , **__lowercase : int , ) -> List[str]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = attention_dropout
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : str = initializer_factor
@classmethod
def UpperCAmelCase ( cls : List[Any] , __lowercase : int , **__lowercase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__a )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__UpperCAmelCase : List[Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class a ( lowercase__ ):
"""simple docstring"""
a : int = 'owlvit_vision_model'
def __init__( self : List[Any] , __lowercase : Tuple=768 , __lowercase : Union[str, Any]=3072 , __lowercase : Optional[Any]=12 , __lowercase : Tuple=12 , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=768 , __lowercase : int=32 , __lowercase : Any="quick_gelu" , __lowercase : int=1e-5 , __lowercase : Tuple=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=1.0 , **__lowercase : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**__a )
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Tuple = initializer_factor
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , __lowercase : Union[str, Any] , **__lowercase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__a )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__UpperCAmelCase : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'owlvit'
a : Optional[int] = True
def __init__( self : str , __lowercase : str=None , __lowercase : Union[str, Any]=None , __lowercase : Tuple=512 , __lowercase : Tuple=2.6_592 , __lowercase : Any=True , **__lowercase : Dict , ) -> int:
super().__init__(**__a )
if text_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
__UpperCAmelCase : Optional[int] = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
__UpperCAmelCase : Any = OwlViTTextConfig(**__a )
__UpperCAmelCase : List[str] = OwlViTVisionConfig(**__a )
__UpperCAmelCase : Any = projection_dim
__UpperCAmelCase : int = logit_scale_init_value
__UpperCAmelCase : Dict = return_dict
__UpperCAmelCase : List[str] = 1.0
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , __lowercase : Optional[int] , **__lowercase : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__a )
__UpperCAmelCase , __UpperCAmelCase : str = cls.get_config_dict(__a , **__a )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , __lowercase : Tuple , **__lowercase : Optional[int] ) -> Any:
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : int = text_config
__UpperCAmelCase : Tuple = vision_config
return cls.from_dict(__a , **__a )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : str = self.text_config.to_dict()
__UpperCAmelCase : Tuple = self.vision_config.to_dict()
__UpperCAmelCase : Any = self.__class__.model_type
return output
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self : List[Any] ) -> float:
return 1e-4
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[int] , __lowercase : Any = -1 , __lowercase : Tuple = -1 , __lowercase : Optional[Any] = None , ) -> Mapping[str, Any]:
__UpperCAmelCase : List[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__a , seq_length=__a , framework=__a )
__UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__a , framework=__a )
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase ( self : str ) -> int:
return 14
| 114 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_UpperCamelCase = sorted(numsa + numsa )
_UpperCamelCase , _UpperCamelCase = divmod(len(__snake_case ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = [float(x) for x in input("""Enter the elements of first array: """).split()]
_a = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 194 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase : List[str] =Vector()
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case__ ) , '''(0,0,0,0,0,1)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case__ ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2] )
UpperCAmelCase : Union[str, Any] =Vector([1, 2, 3, 4, 5] )
UpperCAmelCase : List[str] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase : Optional[int] =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict =Vector([1, 2, 3] )
UpperCAmelCase : List[Any] =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2, 3] )
UpperCAmelCase : int =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict =Vector([1, 2, 3] )
UpperCAmelCase : Dict =Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase : Union[str, Any] =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2, 3] )
UpperCAmelCase : Any =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case__ , snake_case__ ) ) , '''(3,4,7)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase : Union[str, Any] =x.copy()
self.assertEqual(str(snake_case__ ) , str(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case__ ) , '''(0,1,0)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : str =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : Union[str, Any] =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase : Any =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : Optional[Any] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : int =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 352 | import sys
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase )
UpperCAmelCase : List[str] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
UpperCAmelCase : List[Any] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase : str =a + chain_length - 1
UpperCAmelCase : Union[str, Any] =sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : List[Any] =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase : Optional[Any] =cost
UpperCAmelCase : Dict =c
return matrix, sol
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =[30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase , UpperCAmelCase : Optional[int] =matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 78 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = IFPipeline
__UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
__UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def _snake_case (self ):
return self._get_dummy_components()
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self ):
# if
__lowerCAmelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__lowercase , tokenizer=__lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__lowerCAmelCase , __lowerCAmelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowerCAmelCase = None
__lowerCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__lowercase , __lowercase , __lowercase , __lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components )
__lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__lowercase , __lowercase , __lowercase , __lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components )
__lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__lowercase , __lowercase , __lowercase , __lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ):
# pipeline 1
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , num_inference_steps=2 , generator=__lowercase , output_type='''np''' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ):
# pipeline 1
_start_torch_memory_measurement()
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , num_inference_steps=2 , generator=__lowercase , output_type='''np''' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , original_image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase ):
# pipeline 1
_start_torch_memory_measurement()
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__lowercase )
__lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , mask_image=__lowercase , num_inference_steps=2 , generator=__lowercase , output_type='''np''' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__lowercase )
__lowerCAmelCase = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , mask_image=__lowercase , original_image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
def __magic_name__( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 174 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 174 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__lowerCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__lowerCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__lowerCAmelCase )
return parser.parse_args()
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : Any = parse_args()
# Import training_script as a module.
snake_case__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ : str = script_fpath.stem
snake_case__ : Dict = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
snake_case__ : Any = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 359 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Tuple = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_multiple_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = hidden_dropout
snake_case__ : str = attention_dropout
snake_case__ : List[str] = weight_tying
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : int = num_labels
snake_case__ : int = num_choices
snake_case__ : int = scope
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :int ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ):
snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ):
snake_case__ : Any = True
snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : str = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ):
snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase )
snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0]
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : str = False
def __lowerCamelCase ( self :Any ):
snake_case__ : int = GPTNeoXJapaneseModelTester(self )
snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b'''
snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case__ : Optional[int] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase )
snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase )
snake_case__ : Optional[int] = []
for prompt in prompts:
snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 )
snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase ,__lowercase )
| 44 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
UpperCamelCase : str = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : List[Any] ) -> Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCamelCase : Any = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
UpperCamelCase : str = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCamelCase : Optional[int] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCamelCase : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCamelCase ( snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> int:
UpperCamelCase : List[str] = dct.pop(lowerCAmelCase__ )
UpperCamelCase : List[Any] = val
def UpperCamelCase ( snake_case__ : List[Any] ) -> Union[str, Any]:
if "handwritten" in checkpoint_url:
UpperCamelCase : List[str] = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase : str = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
UpperCamelCase : Any = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
return im
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[Any] ) -> Tuple:
UpperCamelCase : Any = ViTConfig(image_size=384 , qkv_bias=lowerCAmelCase__ )
UpperCamelCase : List[str] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCamelCase : Optional[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCamelCase : Optional[int] = 1024
UpperCamelCase : List[Any] = 4096
UpperCamelCase : int = 24
UpperCamelCase : List[Any] = 16
UpperCamelCase : Tuple = 1024
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase : Tuple = False
UpperCamelCase : Dict = 'relu'
UpperCamelCase : Union[str, Any] = 1024
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
UpperCamelCase : Any = False
# load HuggingFace model
UpperCamelCase : List[str] = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
UpperCamelCase : str = TrOCRForCausalLM(lowerCAmelCase__ )
UpperCamelCase : Any = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
UpperCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' , check_hash=lowerCAmelCase__ )['model']
UpperCamelCase : Tuple = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCamelCase : List[Any] = state_dict.pop(lowerCAmelCase__ )
if key.startswith('decoder' ) and "output_projection" not in key:
UpperCamelCase : Union[str, Any] = val
else:
UpperCamelCase : Optional[Any] = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
UpperCamelCase : Dict = ViTImageProcessor(size=encoder_config.image_size )
UpperCamelCase : Any = RobertaTokenizer.from_pretrained('roberta-large' )
UpperCamelCase : List[str] = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase : Tuple = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors='pt' ).pixel_values
# verify logits
UpperCamelCase : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCamelCase : str = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
UpperCamelCase : str = outputs.logits
UpperCamelCase : List[Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCamelCase : Union[str, Any] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCamelCase : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCamelCase : List[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCamelCase : List[str] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCAmelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 119 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (KDPMaDiscreteScheduler,)
_lowerCAmelCase = 1_0
def __UpperCAmelCase ( self , **__magic_name__ ) -> int:
_a = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__magic_name__ )
return config
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='v_prediction' )
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __UpperCAmelCase ( self ) -> Tuple:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __UpperCAmelCase ( self ) -> List[Any]:
if torch_device == "mps":
return
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(__magic_name__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ )
_a = output.prev_sample
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
if str(__magic_name__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 168 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _a ( lowerCamelCase: dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray ) -> np.ndarray:
'''simple docstring'''
__A = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase , lowerCamelCase )
# Predict target for test data
__A = xgb.predict(lowerCamelCase )
__A = predictions.reshape(len(lowerCamelCase ) , 1 )
return predictions
def _a ( ) -> None:
'''simple docstring'''
__A = fetch_california_housing()
__A , __A = data_handling(lowerCamelCase )
__A , __A , __A , __A = train_test_split(
lowerCamelCase , lowerCamelCase , test_size=0.25 , random_state=1 )
__A = xgboost(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(lowerCamelCase , lowerCamelCase )}""" )
print(F"""Mean Square Error : {mean_squared_error(lowerCamelCase , lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 250 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Union[str, Any] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 | 1 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> bool:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = [int(__magic_name__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(__magic_name__ ) == 4 and all(0 <= int(__magic_name__ ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase_ : int = input().strip()
UpperCAmelCase_ : Any = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 38 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(__magic_name__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" )
UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" )
UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = number_of_qubits
for i in range(__magic_name__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__magic_name__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__magic_name__ , __magic_name__ )
# simulate with 10000 shots
UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 38 | 1 |
from __future__ import annotations
def UpperCAmelCase_ (_lowerCAmelCase : list , _lowerCAmelCase : int | None = None , _lowerCAmelCase : int | None = None ):
if start is None:
__UpperCamelCase : List[str] = 0
if end is None:
__UpperCamelCase : List[Any] = len(_lowerCAmelCase ) - 1
if start >= end:
return
__UpperCamelCase : Union[str, Any] = (start + end) // 2
slowsort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
slowsort(_lowerCAmelCase , mid + 1 , _lowerCAmelCase )
if sequence[end] < sequence[mid]:
__UpperCamelCase , __UpperCamelCase : Tuple = sequence[mid], sequence[end]
slowsort(_lowerCAmelCase , _lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 171 |
from math import sqrt
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00_00_00 ):
__UpperCamelCase : int = 0
__UpperCamelCase : int = 0
__UpperCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 171 | 1 |
lowercase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
lowercase = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def __UpperCAmelCase ( a_ , a_ , a_):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ = (
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {", ".join(a_)}'''
)
raise ValueError(a_)
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> int:
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
snake_case_ = model(**a )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , a )
snake_case_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1E-4 ) )
| 178 | 1 |
"""simple docstring"""
import math
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_)
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen")
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_a = 'Enter the base and the power separated by a comma: '
_a , _a = map(int, input(prompt).split(','))
_a , _a = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_a = res(xa, ya)
_a = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 144 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_a = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_a , _a = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_a = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_a = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_a = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 144 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Dict , a__ : List[str] , a__ : List[str]=None , a__ : List[str]=None , a__ : Dict=None , a__ : Tuple=None , a__ : List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
UpperCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ )
if decoder_head_mask is None:
UpperCamelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
if cross_attn_head_mask is None:
UpperCamelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=9_9 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="relu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=2_0 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.eos_token_id # Eos Token
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ = self.get_config()
UpperCamelCase_ = prepare_mam_aaa_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
UpperCamelCase_ = inputs_dict["""input_ids"""]
UpperCamelCase_ = inputs_dict["""attention_mask"""]
UpperCamelCase_ = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["""last_hidden_state"""]
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-2 ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
UpperCamelCase_ = model(**__UpperCamelCase )
UpperCamelCase_ = outputs.encoder_last_hidden_state
UpperCamelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
UpperCamelCase_ = MaMaaaEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
UpperCamelCase_ = MaMaaaDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase_ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
A__ : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
A__ : Union[str, Any] = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = True
A__ : List[Any] = True
A__ : Dict = False
A__ : Optional[int] = False
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = copy.deepcopy(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if not self.is_encoder_decoder:
UpperCamelCase_ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase_ = inputs["""input_ids"""]
UpperCamelCase_ = inputs.get("""decoder_input_ids""" , __UpperCamelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __UpperCamelCase )
UpperCamelCase_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase_ = wte(__UpperCamelCase )
else:
UpperCamelCase_ = wte(__UpperCamelCase )
UpperCamelCase_ = wte(__UpperCamelCase )
with torch.no_grad():
model(**__UpperCamelCase )[0]
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = input_dict["""input_ids"""]
UpperCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ = MaMaaaForConditionalGeneration(__UpperCamelCase ).eval().to(__UpperCamelCase )
if torch_device == "cuda":
model.half()
model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase )
model.generate(num_beams=4 , do_sample=__UpperCamelCase , early_stopping=__UpperCamelCase , num_return_sequences=3 )
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> Union[str, Any]:
return torch.tensor(a__ , dtype=torch.long , device=a__ )
_A = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__UpperCamelCase )
UpperCamelCase_ = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ = prepare_mam_aaa_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )[0]
UpperCamelCase_ = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
UpperCamelCase_ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__UpperCamelCase )
# change to intended input
UpperCamelCase_ = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ = prepare_mam_aaa_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )[0]
UpperCamelCase_ = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
UpperCamelCase_ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__UpperCamelCase )
UpperCamelCase_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
UpperCamelCase_ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase_ = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = model.generate(
input_ids=dct["""input_ids"""].to(__UpperCamelCase ) , attention_mask=dct["""attention_mask"""].to(__UpperCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
UpperCamelCase_ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert generated == expected_en
| 122 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_A = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_A = {'''vinai/bartpho-syllable''': 1_024}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = monolingual_vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase_ = {}
UpperCamelCase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = cnt
cnt += 1
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCamelCase_ = line.strip().split()[0]
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
UpperCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 122 | 1 |
"""simple docstring"""
def lowercase_ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__UpperCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowerCAmelCase__ : int = TaConfig.from_json_file(__UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 212 | 0 |
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 308 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A ) | 308 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase: Any = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 't5'
_lowerCamelCase : Optional[int] = ['past_key_values']
_lowerCamelCase : List[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : str, a_ : str=3_2128, a_ : List[Any]=512, a_ : str=64, a_ : Union[str, Any]=2048, a_ : Optional[int]=6, a_ : Optional[Any]=None, a_ : Tuple=8, a_ : Tuple=32, a_ : Dict=128, a_ : Tuple=0.1, a_ : Any=1e-6, a_ : List[str]=1.0, a_ : str="relu", a_ : str=True, a_ : Union[str, Any]=True, a_ : List[str]=0, a_ : Union[str, Any]=1, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_layers
UpperCamelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase__ = num_heads
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = feed_forward_proj
UpperCamelCase__ = use_cache
UpperCamelCase__ = self.feed_forward_proj.split("-" )
UpperCamelCase__ = act_info[-1]
UpperCamelCase__ = act_info[0] == "gated"
if len(a_ ) > 1 and act_info[0] != "gated" or len(a_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase__ = "gelu_new"
super().__init__(
pad_token_id=a_, eos_token_id=a_, is_encoder_decoder=a_, **a_, )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCamelCase__ = "past_encoder_sequence + sequence"
UpperCamelCase__ = {0: "batch"}
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_, direction="inputs" )
return common_inputs
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 13 | 31 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model | 31 | 1 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : NestedDataStructureLike[PathLike] ,lowercase__ : Optional[NamedSplit] = None ,lowercase__ : Optional[Features] = None ,lowercase__ : str = None ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : Optional[int] = None ,**lowercase__ : Optional[int] ,):
super().__init__(
lowercase__ ,split=lowercase__ ,features=lowercase__ ,cache_dir=lowercase__ ,keep_in_memory=lowercase__ ,streaming=lowercase__ ,num_proc=lowercase__ ,**lowercase__ ,)
__lowercase = path_or_paths if isinstance(lowercase__ ,lowercase__ ) else {self.split: path_or_paths}
__lowercase = Text(
cache_dir=lowercase__ ,data_files=lowercase__ ,features=lowercase__ ,**lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# Build iterable dataset
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=lowercase__ ,download_mode=lowercase__ ,verification_mode=lowercase__ ,base_path=lowercase__ ,num_proc=self.num_proc ,)
__lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=lowercase__ ,in_memory=self.keep_in_memory )
return dataset
| 104 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase = '''cpu'''
__lowercase = Path(A__ )
# VAE DECODER
__lowercase = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 104 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = DiTPipeline
_a : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a : Tuple = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_a : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a : List[str] = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 185 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __a ( UpperCAmelCase ):
_a : Optional[int] = 'MCTCTFeatureExtractor'
_a : int = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_UpperCAmelCase = kwargs.pop('raw_speech' )
else:
_UpperCAmelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings['input_ids']
return inputs
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('input_features' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('labels' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if input_features is not None:
_UpperCAmelCase = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is not None:
_UpperCAmelCase = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCAmelCase = labels['input_ids']
return input_features
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
| 185 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCamelCase__: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
UpperCamelCase__: str = parser.parse_args()
UpperCamelCase__: Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 23 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
UpperCAmelCase : str = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0]
UpperCAmelCase : str = 0
UpperCAmelCase : List[str] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__: str = "abc1abc12"
UpperCamelCase__: str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__: Any = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__: Tuple = "ABABX"
UpperCamelCase__: Union[str, Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__: Any = "AAAB"
UpperCamelCase__: str = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__: int = "abcdabcy"
UpperCamelCase__: Any = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__: List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 23 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( a__):
def __init__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : str ) -> Dict:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 354 | """simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( a__ : list[list[int]] ) -> list[list[int]]:
_UpperCamelCase = []
for i in range(len(a__ ) ):
_UpperCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowercase ( a__ : list[list[int]] , a__ : int ) -> list[Image.Image]:
_UpperCamelCase = []
for _ in range(a__ ):
# Create output image
_UpperCamelCase = Image.new('''RGB''' , (len(cells[0] ), len(a__ )) )
_UpperCamelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCamelCase = 255 - cells[y][x] * 255
_UpperCamelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCamelCase = new_generation(a__ )
return images
if __name__ == "__main__":
UpperCAmelCase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 54 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = 'esm'
def __init__(self : List[str] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=7_6_8 , __UpperCAmelCase : List[str]=1_2 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : List[str]=3_0_7_2 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : int=1_0_2_6 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : int="absolute" , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , mask_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = emb_layer_norm_before
UpperCAmelCase__ = token_dropout
UpperCAmelCase__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase__ = EsmFoldConfig()
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = EsmFoldConfig(**__UpperCAmelCase )
UpperCAmelCase__ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase__ = get_default_vocab_list()
else:
UpperCAmelCase__ = vocab_list
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
if isinstance(self.esmfold_config , __UpperCAmelCase ):
UpperCAmelCase__ = self.esmfold_config.to_dict()
return output
@dataclass
class A :
__UpperCAmelCase : str = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : "TrunkConfig" = None
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if self.trunk is None:
UpperCAmelCase__ = TrunkConfig()
elif isinstance(self.trunk , __UpperCAmelCase ):
UpperCAmelCase__ = TrunkConfig(**self.trunk )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = asdict(self )
UpperCAmelCase__ = self.trunk.to_dict()
return output
@dataclass
class A :
__UpperCAmelCase : int = 48
__UpperCAmelCase : int = 10_24
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : float = 0
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = 1_28
__UpperCAmelCase : "StructureModuleConfig" = None
def lowercase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
if self.structure_module is None:
UpperCAmelCase__ = StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCAmelCase ):
UpperCAmelCase__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase__ = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = asdict(self )
UpperCAmelCase__ = self.structure_module.to_dict()
return output
@dataclass
class A :
__UpperCAmelCase : int = 3_84
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : int = 16
__UpperCAmelCase : int = 1_28
__UpperCAmelCase : int = 12
__UpperCAmelCase : int = 4
__UpperCAmelCase : int = 8
__UpperCAmelCase : float = 0.1
__UpperCAmelCase : int = 8
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 2
__UpperCAmelCase : int = 7
__UpperCAmelCase : int = 10
__UpperCAmelCase : float = 1E-8
__UpperCAmelCase : float = 1E5
def lowercase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
return asdict(self )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 65 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
__lowerCAmelCase: Optional[int] = GPTaConfig()
else:
__lowerCAmelCase: List[str] = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 217 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_lowercase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_0_4_8,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : List[str] = json.loads(f.read() )
lowercase_ : Tuple = collections.OrderedDict()
lowercase_ : str = collections.OrderedDict()
lowercase_ : int = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : List[Any] = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = b
lowercase_ : Any = idx
for wd in b:
lowercase_ : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|startoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , do_clean_text=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
lowercase_ : Union[str, Any] = do_clean_text
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = load_vocab_and_emoji(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.raw_vocab )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(__SCREAMING_SNAKE_CASE , clean=self.do_clean_text )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = ''''''.join(__SCREAMING_SNAKE_CASE ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
lowercase_ : Dict = input_ids[-self.model_max_length :]
return input_ids
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Dict = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
lowercase_ : List[str] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowercase_ : List[str] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowercase_ : Union[str, Any] = token_index
writer.write(''','''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , __SCREAMING_SNAKE_CASE )
return vocab_file, emoji_file
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = vocab # same as swe
lowercase_ : List[str] = ids_to_tokens # same as bpe
lowercase_ : List[str] = emoji
lowercase_ : Tuple = np.max([len(__SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] )
lowercase_ : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
lowercase_ : Optional[Any] = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
lowercase_ : Optional[Any] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
lowercase_ : Any = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowercase_ : Optional[int] = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowercase_ : Dict = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
lowercase_ : Any = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowercase_ : str = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowercase_ : Any = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ):
"""simple docstring"""
return len(self.ids_to_tokens )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = self.content_repattera.sub('''<URL>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.content_repattera.sub('''<EMAIL>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.content_repattera.sub('''<TEL>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = self.content_repattera.sub('''<DATE>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = self.content_repattera.sub('''<DATE>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.content_repattera.sub('''<PRICE>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowercase_ : str = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Optional[int] = text.replace(''' ''' , '''<SP>''' )
lowercase_ : Union[str, Any] = text.replace(''' ''' , '''<SP>''' )
lowercase_ : List[str] = text.replace('''\r\n''' , '''<BR>''' )
lowercase_ : str = text.replace('''\n''' , '''<BR>''' )
lowercase_ : List[str] = text.replace('''\r''' , '''<BR>''' )
lowercase_ : List[str] = text.replace('''\t''' , '''<TAB>''' )
lowercase_ : Any = text.replace('''—''' , '''ー''' )
lowercase_ : Optional[int] = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase_ : str = text.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if clean:
lowercase_ : List[str] = self.clean_text(__SCREAMING_SNAKE_CASE )
def check_simbol(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = x.encode()
if len(__SCREAMING_SNAKE_CASE ) == 1 and len(__SCREAMING_SNAKE_CASE ) == 2:
lowercase_ : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = x.encode()
if len(__SCREAMING_SNAKE_CASE ) == 1 and len(__SCREAMING_SNAKE_CASE ) == 3:
lowercase_ : Optional[int] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = []
while pos < len(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = min(len(__SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowercase_ : Tuple = [] # (token_id, token, pos)
for e in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 ):
lowercase_ : List[str] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__SCREAMING_SNAKE_CASE ) > 2:
lowercase_ : int = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
# the smallest token_id is adopted
lowercase_ , lowercase_ , lowercase_ : str = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = e
else:
lowercase_ : Optional[Any] = pos + 1
lowercase_ : Union[str, Any] = text[pos:end]
if check_simbol(__SCREAMING_SNAKE_CASE ):
result.append('''<KIGOU>''' )
elif checkuae(__SCREAMING_SNAKE_CASE ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
lowercase_ : int = end
return result
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="\n" ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : Any = []
lowercase_ : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(__SCREAMING_SNAKE_CASE ).decode('''utf-8''' , errors='''replace''' ) )
lowercase_ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(__SCREAMING_SNAKE_CASE )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(__SCREAMING_SNAKE_CASE ).decode('''utf-8''' , errors='''replace''' ) )
lowercase_ : List[Any] = ''''''.join(__SCREAMING_SNAKE_CASE )
return text
| 264 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Tuple = prime_factors(__SCREAMING_SNAKE_CASE )
if is_square_free(__SCREAMING_SNAKE_CASE ):
return -1 if len(__SCREAMING_SNAKE_CASE ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : torch.FloatTensor
class __snake_case ( a , a ):
@register_to_config
def __init__( self : List[str] , _snake_case : int = 65536 , _snake_case : Optional[int] = None , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 0 , _snake_case : str = "fourier" , _snake_case : bool = True , _snake_case : bool = False , _snake_case : float = 0.0 , _snake_case : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case : Tuple[str] = "UNetMidBlock1D" , _snake_case : str = None , _snake_case : Tuple[int] = (32, 32, 64) , _snake_case : str = None , _snake_case : int = 8 , _snake_case : int = 1 , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ = block_out_channels[0] * 4
UpperCAmelCase_ = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = None
# down
UpperCAmelCase_ = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ = i == len(_snake_case) - 1
UpperCAmelCase_ = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ = list(reversed(_snake_case))
UpperCAmelCase_ = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ = out_channels
else:
UpperCAmelCase_ = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ = i == len(_snake_case) - 1
UpperCAmelCase_ = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ = output_channel
# out
UpperCAmelCase_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase ( self : str , _snake_case : torch.FloatTensor , _snake_case : Union[torch.Tensor, float, int] , _snake_case : bool = True , ):
"""simple docstring"""
UpperCAmelCase_ = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ = timesteps[None].to(sample.device)
UpperCAmelCase_ = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ = timestep_embed[..., None]
UpperCAmelCase_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ = down_block_res_samples[-1:]
UpperCAmelCase_ = down_block_res_samples[:-1]
UpperCAmelCase_ = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 51 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ : int = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["input_features", "attention_mask"]
def __init__( self , UpperCamelCase=80 , UpperCamelCase=1_6000 , UpperCamelCase=80 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = do_ceptral_normalize
lowerCamelCase_ = normalize_means
lowerCamelCase_ = normalize_vars
lowerCamelCase_ = True
def snake_case ( self , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase_ = torch.from_numpy(UpperCamelCase ).unsqueeze(0 )
lowerCamelCase_ = ta_kaldi.fbank(UpperCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 0.0 , ):
"""simple docstring"""
# make sure we normalize float32 arrays
if normalize_means:
lowerCamelCase_ = x[:input_length].mean(axis=0 )
lowerCamelCase_ = np.subtract(UpperCamelCase , UpperCamelCase )
if normalize_vars:
lowerCamelCase_ = x[:input_length].std(axis=0 )
lowerCamelCase_ = np.divide(UpperCamelCase , UpperCamelCase )
if input_length < x.shape[0]:
lowerCamelCase_ = padding_value
# make sure array is in float32
lowerCamelCase_ = x.astype(np.floataa )
return x
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase , UpperCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase , UpperCamelCase )
]
def __call__( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
lowerCamelCase_ = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [raw_speech]
# extract fbank features
lowerCamelCase_ = [self._extract_fbank_features(UpperCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ = BatchFeature({"input_features": features} )
lowerCamelCase_ = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCamelCase ):
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase_ = (
np.array(UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCamelCase )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
| 55 | 0 |
from math import sqrt
def _lowercase ( _UpperCAmelCase = 1_00_00_00 ) -> int:
lowerCamelCase =0
lowerCamelCase =0
lowerCamelCase =42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_UpperCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 262 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Any ={
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
__A = """vit_msn"""
def __init__( self , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-06 , UpperCAmelCase_=224 , UpperCAmelCase_=16 , UpperCAmelCase_=3 , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =image_size
lowerCamelCase =patch_size
lowerCamelCase =num_channels
lowerCamelCase =qkv_bias
| 262 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=False ) -> Optional[int]:
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
lowerCAmelCase = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
lowerCAmelCase = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(UpperCamelCase_ )-1}" )
if "norm" in key:
lowerCAmelCase = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
lowerCAmelCase = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(UpperCamelCase_ )-1}" )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase = key.replace(f"block{idx}" , f"block.{int(UpperCamelCase_ )-1}" )
if "attn.q" in key:
lowerCAmelCase = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase = key.replace(f"linear_c{idx}" , f"linear_c.{int(UpperCamelCase_ )-1}" )
if key.startswith('''head''' ):
lowerCAmelCase = key.replace('''head''' , '''classifier''' )
lowerCAmelCase = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Dict:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
lowerCAmelCase = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = SegformerConfig()
lowerCAmelCase = False
# set attributes based on model_name
lowerCAmelCase = '''huggingface/label-files'''
if "segformer" in model_name:
lowerCAmelCase = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
lowerCAmelCase = 1_5_0
lowerCAmelCase = '''ade20k-id2label.json'''
lowerCAmelCase = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
lowerCAmelCase = 1_9
lowerCAmelCase = '''cityscapes-id2label.json'''
lowerCAmelCase = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(f"Model {model_name} not supported" )
elif "mit" in model_name:
lowerCAmelCase = True
lowerCAmelCase = model_name[4:6]
lowerCAmelCase = 1_0_0_0
lowerCAmelCase = '''imagenet-1k-id2label.json'''
lowerCAmelCase = (1, 1_0_0_0)
else:
raise ValueError(f"Model {model_name} not supported" )
# set config attributes
lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 2_5_6
elif size == "b2":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 4, 1_8, 3]
elif size == "b4":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 8, 2_7, 3]
elif size == "b5":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 6, 4_0, 3]
else:
raise ValueError(f"Size {size} not supported" )
# load image processor (only resize + normalize)
lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
if encoder_only:
lowerCAmelCase = torch.load(UpperCamelCase_ , map_location=torch.device('''cpu''' ) )
else:
lowerCAmelCase = torch.load(UpperCamelCase_ , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
lowerCAmelCase = rename_keys(UpperCamelCase_ , encoder_only=UpperCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_ , UpperCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase = False
lowerCAmelCase = SegformerForImageClassification(UpperCamelCase_ )
else:
lowerCAmelCase = SegformerForSemanticSegmentation(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# forward pass
lowerCAmelCase = model(UpperCamelCase_ )
lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
lowerCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''ibert'''
def __init__( self , lowerCAmelCase_=3_0522 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=False , lowerCAmelCase_="none" , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = quant_mode
_snake_case = force_dequant
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 360 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any]=False , snake_case__ : Union[str, Any]=False , snake_case__ : List[str]=False ):
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _snake_case ( snake_case__ : str , snake_case__ : List[str] ):
for i in range(config.num_hidden_layers ):
A = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' )
A = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def _snake_case ( snake_case__ : Optional[int] ):
A = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
A = dct.pop(snake_case__ )
A = val
@torch.no_grad()
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : str ):
A = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=snake_case__ )
A = False
A = False
A = False
A = False
if "vqa" in checkpoint_url:
A = True
A = 3129
A = 'huggingface/label-files'
A = 'vqa2-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = ViltForQuestionAnswering(snake_case__ )
elif "nlvr" in checkpoint_url:
A = True
A = 2
A = {0: 'False', 1: 'True'}
A = {v: k for k, v in config.idalabel.items()}
A = 3
A = ViltForImagesAndTextClassification(snake_case__ )
elif "irtr" in checkpoint_url:
A = True
A = ViltForImageAndTextRetrieval(snake_case__ )
elif "mlm_itm" in checkpoint_url:
A = True
A = ViltForMaskedLM(snake_case__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['state_dict']
A = create_rename_keys(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ )
if mlm_model or irtr_model:
A = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case__ )
# Define processor
A = ViltImageProcessor(size=384 )
A = BertTokenizer.from_pretrained('bert-base-uncased' )
A = ViltProcessor(snake_case__ , snake_case__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=snake_case__ ).raw )
A = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=snake_case__ ).raw )
A = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
A = processor(snake_case__ , snake_case__ , return_tensors='pt' )
A = processor(snake_case__ , snake_case__ , return_tensors='pt' )
A = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=snake_case__ ).raw )
if mlm_model:
A = 'a bunch of [MASK] laying on a [MASK].'
else:
A = 'How many cats are there?'
A = processor(snake_case__ , snake_case__ , return_tensors='pt' )
A = model(**snake_case__ )
# Verify outputs
if mlm_model:
A = torch.Size([1, 11, 3_0522] )
A = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case__ , atol=1e-4 )
# verify masked token prediction equals "cats"
A = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A = torch.Size([1, 3129] )
A = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case__ , atol=1e-4 )
# verify vqa prediction equals "2"
A = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A = torch.Size([1, 2] )
A = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 74 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
__SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ = model_type_to_module_name(_SCREAMING_SNAKE_CASE )
snake_case_ = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_SCREAMING_SNAKE_CASE , """__name__""" , _SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ = importlib.import_module("""transformers""" )
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
snake_case_ = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(_SCREAMING_SNAKE_CASE )
class __A :
'''simple docstring'''
def __init__( self : str ) ->List[Any]:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def lowerCAmelCase ( cls : Any , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = kwargs.pop("""config""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""trust_remote_code""" , UpperCAmelCase_ )
snake_case_ = True
snake_case_ , snake_case_ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = config_dict.get("""feature_extractor_type""" , UpperCAmelCase_ )
snake_case_ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
snake_case_ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
snake_case_ = getattr(UpperCAmelCase_ , """feature_extractor_type""" , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
snake_case_ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
snake_case_ = feature_extractor_class_from_name(UpperCAmelCase_ )
snake_case_ = feature_extractor_auto_map is not None
snake_case_ = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
snake_case_ = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
snake_case_ = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = kwargs.pop("""code_revision""" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
snake_case_ = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) ->List[Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 233 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = CycleDiffusionPipeline
__lowercase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowercase: str = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowercase: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""})
__lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ = CLIPTextModel(UpperCAmelCase_ )
snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=0 ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = image / 2 + 0.5
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase_ , """half""" ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 233 | 1 |
from ...processing_utils import ProcessorMixin
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = """WhisperFeatureExtractor"""
lowercase_ : List[str] = """WhisperTokenizer"""
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.feature_extractor
_lowercase : int = False
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True) -> List[str]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase, language=lowerCamelCase, no_timestamps=lowerCamelCase)
def __call__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase, **lowerCamelCase)
_lowercase : Any = kwargs.pop('audio', lowerCamelCase)
_lowercase : Union[str, Any] = kwargs.pop('sampling_rate', lowerCamelCase)
_lowercase : List[str] = kwargs.pop('text', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : int = args[0]
_lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
_lowercase : Dict = self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : Dict = self.tokenizer(lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Union[str, Any] = encodings['input_ids']
return inputs
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="np") -> str:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCamelCase, return_tensors=lowerCamelCase)
| 21 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'single_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self : int ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) | 225 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = value_function
UpperCAmelCase : Dict = unet
UpperCAmelCase : Union[str, Any] = scheduler
UpperCAmelCase : List[Any] = env
UpperCAmelCase : int = env.get_dataset()
UpperCAmelCase : Optional[int] = {}
for key in self.data.keys():
try:
UpperCAmelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : int = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : Any = env.observation_space.shape[0]
UpperCAmelCase : str = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if type(_SCREAMING_SNAKE_CASE ) is dict:
return {k: self.to_torch(_SCREAMING_SNAKE_CASE ) for k, v in x_in.items()}
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ):
return x_in.to(self.unet.device )
return torch.tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
for key, val in cond.items():
UpperCAmelCase : Optional[Any] = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = x.shape[0]
UpperCAmelCase : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Tuple = torch.full((batch_size,) , _SCREAMING_SNAKE_CASE , device=self.unet.device , dtype=torch.long )
for _ in range(_SCREAMING_SNAKE_CASE ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Dict = self.value_function(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : List[Any] = self.scheduler._get_variance(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : str = model_std * grad
UpperCAmelCase : str = 0
UpperCAmelCase : Any = x.detach()
UpperCAmelCase : int = x + scale * grad
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predict_epsilon=_SCREAMING_SNAKE_CASE )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Dict = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : int = self.to_torch(_SCREAMING_SNAKE_CASE )
return x, y
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.normalize(_SCREAMING_SNAKE_CASE , """observations""" )
UpperCAmelCase : int = obs[None].repeat(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase : Dict = {0: self.to_torch(_SCREAMING_SNAKE_CASE )}
UpperCAmelCase : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : str = randn_tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : str = self.to_torch(_SCREAMING_SNAKE_CASE )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Any = self.run_diffusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# sort output trajectories by value
UpperCAmelCase : List[str] = y.argsort(0 , descending=_SCREAMING_SNAKE_CASE ).squeeze()
UpperCAmelCase : Any = x[sorted_idx]
UpperCAmelCase : Dict = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : int = actions.detach().cpu().numpy()
UpperCAmelCase : List[str] = self.de_normalize(_SCREAMING_SNAKE_CASE , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCAmelCase : Any = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Optional[int] = np.random.randint(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 76 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=sys.maxsize ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = """bilinear"""
UpperCAmelCase : Tuple = max_size
UpperCAmelCase : Optional[Any] = short_edge_length
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = []
for img in imgs:
UpperCAmelCase , UpperCAmelCase : List[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase : List[str] = size * 1.0 / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
UpperCAmelCase , UpperCAmelCase : Dict = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase : Optional[int] = scale * h, size
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > self.max_size:
UpperCAmelCase : List[str] = self.max_size * 1.0 / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = newh * scale
UpperCAmelCase : int = neww * scale
UpperCAmelCase : Tuple = int(neww + 0.5 )
UpperCAmelCase : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase : str = Image.fromarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase : Union[str, Any] = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=_SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(_SCREAMING_SNAKE_CASE )
return img_augs
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase : List[Any] = cfg.INPUT.FORMAT
UpperCAmelCase : Dict = cfg.SIZE_DIVISIBILITY
UpperCAmelCase : Dict = cfg.PAD_VALUE
UpperCAmelCase : Tuple = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase : Any = cfg.MODEL.DEVICE
UpperCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase : List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase : Optional[Any] = lambda _SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = tuple(max(_SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase : List[Any] = [im.shape[-2:] for im in images]
UpperCAmelCase : Any = [
nn.functional.pad(
_SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return torch.stack(_SCREAMING_SNAKE_CASE ), torch.tensor(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = [images]
if single_image:
assert len(_SCREAMING_SNAKE_CASE ) == 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_SCREAMING_SNAKE_CASE , images.pop(_SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(_SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase : str = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase : int = self.aug(_SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase : Any = [self.normalizer(_SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.pad(_SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase : int = torch.true_divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Tuple[int, int] ):
assert torch.isfinite(UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase , UpperCAmelCase : str = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase )
| 76 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase =FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase =0.01
with locka.acquire():
with pytest.raises(__snake_case ):
_lowercase =time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''a''' * 1000 + '''.lock'''
_lowercase =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowercase =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 5 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A__: Optional[int] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
A__: int = logging.WARNING
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[int] = os.getenv("DATASETS_VERBOSITY" ,A_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys()) }")
return _default_log_level
def lowerCAmelCase_ ( ):
return __name__.split(".")[0]
def lowerCAmelCase_ ( ):
return logging.getLogger(_get_library_name())
def lowerCAmelCase_ ( ):
# Apply our default configuration to the library root logger.
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCAmelCase_ ( A_ = None):
if name is None:
UpperCamelCase__: Optional[Any] = _get_library_name()
return logging.getLogger(A_)
def lowerCAmelCase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( A_):
_get_library_root_logger().setLevel(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[Any] = False
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self: int , *__lowerCamelCase: Tuple , **__lowerCamelCase: str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase__: int = args[0] if args else None
def __iter__( self: Optional[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
def empty_fn(*__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return
A__: Tuple = True
class _a :
"""simple docstring"""
def __call__( self: Any , *__lowerCamelCase: List[str] , __lowerCamelCase: List[Any]=False , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] , *__lowerCamelCase: List[str] , **__lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__: Optional[Any] = _tqdm_cls()
def lowerCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active)
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: int = True
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: str = False
| 149 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__magic_name__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__magic_name__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__magic_name__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 0 |
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : str = "" , SCREAMING_SNAKE_CASE__ : bool = False ) -> None:
# Mapping from the first character of the prefix of the node
__lowerCAmelCase = {}
# A node will be a leaf if the tree contains its word
__lowerCAmelCase = is_leaf
__lowerCAmelCase = prefix
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str, str]:
__lowerCAmelCase = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : list[str] ) -> None:
for word in words:
self.insert(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCAmelCase = RadixNode(prefix=SCREAMING_SNAKE_CASE__ , is_leaf=SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = self.nodes[word[0]]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCAmelCase = remaining_prefix
__lowerCAmelCase = self.nodes[matching_string[0]]
__lowerCAmelCase = RadixNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = aux_node
if remaining_word == "":
__lowerCAmelCase = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> bool:
__lowerCAmelCase = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> bool:
__lowerCAmelCase = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE__ )
if not incoming_node:
return False
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = incoming_node.match(
SCREAMING_SNAKE_CASE__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCAmelCase = list(self.nodes.values() )[0]
__lowerCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
__lowerCAmelCase = list(incoming_node.nodes.values() )[0]
__lowerCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCAmelCase = merging_node.nodes
return True
def a ( self : int , SCREAMING_SNAKE_CASE__ : int = 0 ) -> None:
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase_ ( ) -> bool:
'''simple docstring'''
__lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
__lowerCAmelCase = RadixNode()
root.insert_many(snake_case_ )
assert all(root.find(snake_case_ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
__lowerCAmelCase = RadixNode()
__lowerCAmelCase = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(snake_case_ )
print("""Words:""" , snake_case_ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 229 | '''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class _lowercase ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
_SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
_SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
_SCREAMING_SNAKE_CASE : int = 1280
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = False
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
__lowerCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase = jnp.zeros(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
__lowerCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )["params"]
def a ( self : int ) -> List[str]:
__lowerCAmelCase = self.block_out_channels
__lowerCAmelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
__lowerCAmelCase = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase = []
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = down_blocks
# mid
__lowerCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowerCAmelCase = []
__lowerCAmelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = reversed_block_out_channels[min(i + 1 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
__lowerCAmelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowerCAmelCase = FlaxCrossAttnUpBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase = FlaxUpBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = output_channel
__lowerCAmelCase = up_blocks
# out
__lowerCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ):
__lowerCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase = jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCAmelCase = self.time_proj(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.time_embedding(SCREAMING_SNAKE_CASE__ )
# 2. pre-process
__lowerCAmelCase = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 2, 3, 1) )
__lowerCAmelCase = self.conv_in(SCREAMING_SNAKE_CASE__ )
# 3. down
__lowerCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase , __lowerCAmelCase = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
else:
__lowerCAmelCase , __lowerCAmelCase = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowerCAmelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase = new_down_block_res_samples
# 4. mid
__lowerCAmelCase = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowerCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowerCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = up_block(
SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , deterministic=not train , )
else:
__lowerCAmelCase = up_block(SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , deterministic=not train )
# 6. post-process
__lowerCAmelCase = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.silu(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.conv_out(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case_ ( __lowercase ):
A_ = 'Speech2TextFeatureExtractor'
A_ = 'Speech2TextTokenizer'
def __init__( self : List[str] , _snake_case : Dict , _snake_case : Tuple )->Optional[Any]:
'''simple docstring'''
super().__init__(_snake_case , _snake_case )
__lowerCAmelCase : str = self.feature_extractor
__lowerCAmelCase : Union[str, Any] = False
def __call__( self : Dict , *_snake_case : List[Any] , **_snake_case : Any )->Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowerCAmelCase : Union[str, Any] = kwargs.pop("""raw_speech""" )
else:
__lowerCAmelCase : int = kwargs.pop("""audio""" , _snake_case )
__lowerCAmelCase : List[Any] = kwargs.pop("""sampling_rate""" , _snake_case )
__lowerCAmelCase : Union[str, Any] = kwargs.pop("""text""" , _snake_case )
if len(_snake_case ) > 0:
__lowerCAmelCase : Optional[Any] = args[0]
__lowerCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowerCAmelCase : Dict = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if text is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase : Tuple = encodings["""input_ids"""]
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] , *_snake_case : str , **_snake_case : str )->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Tuple , *_snake_case : Tuple , **_snake_case : Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def UpperCAmelCase__ ( self : List[str] )->int:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowerCAmelCase : str = True
__lowerCAmelCase : List[Any] = self.tokenizer
yield
__lowerCAmelCase : Any = self.feature_extractor
__lowerCAmelCase : Tuple = False | 232 |
_UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :dict[int, list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[bool] ) -> list[int]:
__lowerCAmelCase : str = True
__lowerCAmelCase : str = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
order.append(SCREAMING_SNAKE_CASE )
return order
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :dict[int, list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[bool] ) -> list[int]:
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return component
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :dict[int, list[int]] ) -> list[list[int]]:
__lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) * [False]
__lowerCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = []
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : Optional[int] = order[len(SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
__lowerCAmelCase : Any = find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
components_list.append(SCREAMING_SNAKE_CASE )
return components_list | 232 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''detr'''
__lowercase : Any = ['''past_key_values''']
__lowercase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCAmelCase__)
# set timm attributes to None
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None, None, None
__SCREAMING_SNAKE_CASE = use_timm_backbone
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return self.encoder_attention_heads
@property
def snake_case_ ( self):
return self.d_model
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
return cls(backbone_config=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def snake_case_ ( self):
return 1E-5
@property
def snake_case_ ( self):
return 1_2
| 100 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Tuple , a_ : str=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE :Dict = nn.Parameter(a_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE :Optional[int] = nn.Parameter(a_ )
def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Optional[int] ) -> Any:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : List[str] ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Any = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE :Dict = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> Union[str, Any]:
# layernorm 1
__SCREAMING_SNAKE_CASE :Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE :List[Any] = weights[0][1]
if len(a_ ) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_ )
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE :List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_ ) == 4:
__SCREAMING_SNAKE_CASE :List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE :Tuple = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : List[Any] ) -> Optional[Any]:
# reformer model
__SCREAMING_SNAKE_CASE :Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_ ) , )
if isinstance(weights[3] , a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE :List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE :str = nn.Parameter(torch.tensor(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE :Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_ )
# output layer norm
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : Any , a_ : Dict , a_ : Dict ) -> Tuple:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :List[str] = ReformerConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :List[Any] = ReformerModelWithLMHead(a_ )
with open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = pickle.load(a_ )['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 191 | 0 |
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : List[Any] , lowercase_ : List[Any]=None , lowercase_ : str=2048) -> Dict:
"""simple docstring"""
_UpperCamelCase = config.__dict__
_UpperCamelCase = modal_hidden_size
if num_labels:
_UpperCamelCase = num_labels
| 63 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCamelCase__ = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = FunnelTokenizer
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = 2
def __init__( self : Tuple , lowercase_ : Any=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : List[str]="<unk>" , lowercase_ : List[Any]="<sep>" , lowercase_ : int="<pad>" , lowercase_ : Dict="<cls>" , lowercase_ : int="<mask>" , lowercase_ : Any="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : str=None , lowercase_ : Dict="##" , **lowercase_ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , clean_text=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , wordpieces_prefix=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowercase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(lowercase_ , normalizer_state.pop("type"))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**lowercase_)
_UpperCamelCase = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None) -> str:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 63 | 1 |
def _a ( a :int , a :int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 0 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return TrainCommand(lowerCamelCase__ )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE : ArgumentParser ):
lowercase__ : Optional[int] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=SCREAMING_SNAKE_CASE , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=SCREAMING_SNAKE_CASE , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=SCREAMING_SNAKE_CASE , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=SCREAMING_SNAKE_CASE , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=SCREAMING_SNAKE_CASE , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=SCREAMING_SNAKE_CASE , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=SCREAMING_SNAKE_CASE , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=SCREAMING_SNAKE_CASE , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=SCREAMING_SNAKE_CASE , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=SCREAMING_SNAKE_CASE , default=1E-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self : int , SCREAMING_SNAKE_CASE : Namespace ):
lowercase__ : int = logging.get_logger("transformers-cli/training" )
lowercase__ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = args.output
lowercase__ : Union[str, Any] = args.column_label
lowercase__ : Optional[int] = args.column_text
lowercase__ : Optional[int] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowercase__ : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowercase__ : List[str] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Union[str, Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowercase__ : Optional[int] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Dict = args.validation_split
lowercase__ : List[str] = args.train_batch_size
lowercase__ : Any = args.valid_batch_size
lowercase__ : Optional[int] = args.learning_rate
lowercase__ : int = args.adam_epsilon
def snake_case ( self : Dict ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self : Union[str, Any] ):
raise NotImplementedError
def snake_case ( self : Union[str, Any] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 355 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase__ = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{j}.'''
lowerCAmelCase__ = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = v
lowercase__ : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase__ = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{i}.'''
lowerCAmelCase__ = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[int] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = v
lowercase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Optional[int] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ : Dict = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : List[Any] = {}
lowercase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase__ : int = k[: -len(".q_proj.weight" )]
lowercase__ : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase__ : Dict = [None, None, None]
lowercase__ : Any = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase__ : Optional[int] = k[: -len(".q_proj.bias" )]
lowercase__ : Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase__ : str = [None, None, None]
lowercase__ : str = v
continue
lowercase__ : Union[str, Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : str = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Any = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : List[str] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Tuple = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 121 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def snake_case_ ( lowerCAmelCase_ = "https://www.worldometers.info/coronavirus" )-> dict:
'''simple docstring'''
_UpperCAmelCase : str = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , """html.parser""" )
_UpperCAmelCase : List[str] = soup.findAll("""h1""" )
_UpperCAmelCase : List[Any] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCAmelCase_ , lowerCAmelCase_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 215 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = """laion/clap-htsat-unfused"""
_UpperCAmelCase : int = tempfile.mkdtemp()
def _snake_case ( self ,**a_ ) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Tuple = floats_list((3, 1_000) )
_UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Union[str, Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=a_ )
_UpperCAmelCase : Any = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(a_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 215 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *lowercase_ : List[str] , **lowercase_ : List[Any] ):
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase):
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : List[Any] = image_classifier(lowercase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase_ ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
lowercase_ : str = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Dict = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Dict = image_classifier(lowercase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
lowercase_ : int = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
{"""score""": 0.3_33, """label""": ANY(lowercase_ )},
],
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : str = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Union[str, Any] = image_classifier(lowercase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
lowercase_ : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Union[str, Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowercase_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : List[Any] = image_classifier(lowercase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
lowercase_ : Tuple = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 353 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _A () -> int:
'''simple docstring'''
_a = torch.nn.Linear(2 , 4 )
_a = torch.optim.AdamW(model.parameters() , lr=1.0 )
_a = torch.optim.lr_scheduler.OneCycleLR(_lowercase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_a = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_a = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _A (lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowercase )
class a ( _a ):
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(A_ ):
_a = Accelerator(cpu=A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = Accelerator()
_a = GradientState()
assert state.num_steps == 1
_a = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_a = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCAmelCase ( self ) -> Tuple:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCAmelCase ( self ) -> int:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__magic_name__ , **__magic_name__ ):
pass
with patch('torch.cuda.set_device' , A_ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
_a = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
_a = get_signature(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
def __UpperCAmelCase ( self ) -> Tuple:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
_a = get_signature(A_ )
# saving hook
def save_config(__magic_name__ , __magic_name__ , __magic_name__ ):
_a = {'class_name': models[0].__class__.__name__}
with open(os.path.join(A_ , 'data.json' ) , 'w' ) as f:
json.dump(A_ , A_ )
# loading hook
def load_config(__magic_name__ , __magic_name__ ):
with open(os.path.join(A_ , 'data.json' ) , 'r' ) as f:
_a = json.load(A_ )
_a = config['class_name']
_a = accelerator.register_save_state_pre_hook(A_ )
_a = accelerator.register_load_state_pre_hook(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks removed
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = None
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertTrue(dummy_obj is None )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = [1, 2, 3]
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Optional[int]:
from transformers import AutoModelForCausalLM
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=A_ , device_map={'': 0} , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(A_ )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
_a = Accelerator()
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
_a = infer_auto_device_map(A_ )
_a = 'cpu'
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=A_ , load_in_abit=A_ , llm_inta_enable_fpaa_cpu_offload=A_ )
# This should not work and get value error
with self.assertRaises(A_ ):
_a = accelerator.prepare(A_ )
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
_a = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
_a = infer_auto_device_map(A_ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=A_ , device_map=A_ , )
_a = Accelerator()
# This should not work and get value error
with self.assertRaises(A_ ):
_a = accelerator.prepare(A_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Tuple:
from transformers import AutoModelForCausalLM
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
_a = infer_auto_device_map(A_ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=A_ , device_map=A_ , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(A_ )
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = torch.nn.Linear(10 , 10 )
_a = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_a = Accelerator(cpu=A_ )
_a = accelerator.prepare(A_ )
| 168 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_A = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'albert'
def __init__(self , _lowerCamelCase=30000 , _lowerCamelCase=128 , _lowerCamelCase=4096 , _lowerCamelCase=12 , _lowerCamelCase=1 , _lowerCamelCase=64 , _lowerCamelCase=16384 , _lowerCamelCase=1 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0.1 , _lowerCamelCase="absolute" , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=3 , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_hidden_groups
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[str] = inner_group_num
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : List[Any] = classifier_dropout_prob
UpperCAmelCase__ : int = position_embedding_type
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _a (self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 166 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE = (('eta', 0.0), ('num_inference_steps', 5_0))
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = 10, 0.0
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 10, 0.0
scheduler.set_timesteps(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Tuple = samplea.shape[0]
UpperCAmelCase__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : int = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : int = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 166 | 1 |
from collections.abc import Generator
from math import sin
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
_lowerCAmelCase : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def A ( _lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_lowerCAmelCase : Dict = format(_lowerCamelCase , "08x" )[-8:]
_lowerCAmelCase : int = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = b""
for char in message:
bit_string += format(_lowerCamelCase , "08b" ).encode("utf-8" )
_lowerCAmelCase : Tuple = format(len(_lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(_lowerCamelCase ) , 512 ):
_lowerCAmelCase : List[str] = bit_string[pos : pos + 512]
_lowerCAmelCase : Optional[Any] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def A ( _lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_lowerCAmelCase : str = format(_lowerCamelCase , "032b" )
_lowerCAmelCase : Optional[Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowerCamelCase , 2 )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (a + b) % 2**32
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = preprocess(_lowerCamelCase )
_lowerCAmelCase : Tuple = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCAmelCase : int = 0X67_45_23_01
_lowerCAmelCase : Optional[Any] = 0XEF_CD_AB_89
_lowerCAmelCase : Union[str, Any] = 0X98_BA_DC_FE
_lowerCAmelCase : Dict = 0X10_32_54_76
_lowerCAmelCase : int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowerCamelCase ):
_lowerCAmelCase : Dict = aa
_lowerCAmelCase : Union[str, Any] = ba
_lowerCAmelCase : Dict = ca
_lowerCAmelCase : Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCAmelCase : List[Any] = d ^ (b & (c ^ d))
_lowerCAmelCase : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCAmelCase : List[str] = c ^ (d & (b ^ c))
_lowerCAmelCase : Dict = (5 * i + 1) % 16
elif i <= 47:
_lowerCAmelCase : Union[str, Any] = b ^ c ^ d
_lowerCAmelCase : Optional[int] = (3 * i + 5) % 16
else:
_lowerCAmelCase : List[str] = c ^ (b | not_aa(_lowerCamelCase ))
_lowerCAmelCase : int = (7 * i) % 16
_lowerCAmelCase : int = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCAmelCase : Optional[int] = d
_lowerCAmelCase : Tuple = c
_lowerCAmelCase : int = b
_lowerCAmelCase : Tuple = sum_aa(_lowerCamelCase , left_rotate_aa(_lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCAmelCase : List[str] = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = ['input_ids', 'attention_mask']
__UpperCAmelCase : Any = NllbTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__a = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''eng_Latn'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.