code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=None ) -> List[Any]:
_a = data
_a = None
def __repr__( self ) -> Union[str, Any]:
_a = []
_a = self
while temp:
string_rep.append(F'{temp.data}' )
_a = temp.next
return "->".join(__UpperCAmelCase )
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
_a = _a = Node(elements_list[0] )
for i in range(1, len(_lowerCAmelCase ) ):
_a = Node(elements_list[i] )
_a = current.next
return head
def A_ ( _lowerCAmelCase : Node ):
"""simple docstring"""
if head_node is not None and isinstance(_lowerCAmelCase, _lowerCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def A_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
_a = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(_lowerCAmelCase )
print('''Elements in Reverse:''' )
print_reverse(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 320 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def A_ ( _lowerCAmelCase : Dict="ro", _lowerCAmelCase : List[Any]="en", _lowerCAmelCase : str="wmt16", _lowerCAmelCase : Dict=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_a = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
_a = datasets.load_dataset(_lowerCAmelCase, _lowerCAmelCase )
if save_dir is None:
_a = f'{dataset}-{pair}'
_a = Path(_lowerCAmelCase )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
_a = '''val''' if split == '''validation''' else split
_a = save_dir.joinpath(f'{fn}.source' )
_a = save_dir.joinpath(f'{fn}.target' )
_a = src_path.open('''w+''' )
_a = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_a = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 320 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320 | 1 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ) -> List[str]:
_a = scheduler
_a = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
_a = split_batches
_a = step_with_optimizer
_a = GradientState()
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_a = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
return self.scheduler.get_last_lr()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.scheduler.state_dict()
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
self.scheduler.load_state_dict(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
return self.scheduler.get_lr()
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__snake_case = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def remove_articles(_lowerCAmelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Tuple ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(_lowerCAmelCase )
_a = Counter(_lowerCAmelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(_lowerCAmelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ):
"""simple docstring"""
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] )
_a = sari_score / len(_lowerCAmelCase )
return 1_00 * sari_score
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ):
"""simple docstring"""
_a = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_a = sacrebleu.corpus_bleu(
_lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = {}
result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 320 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : List[str] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a = JsonDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase, keep_in_memory=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase, _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Any, _lowerCAmelCase : int ):
"""simple docstring"""
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_a = features.copy() if features else default_expected_features
_a = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = JsonDatasetReader(_lowerCAmelCase, features=_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase, _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
], )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = tmp_path / '''cache'''
_a = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_a = features.copy() if features else default_expected_features
_a = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = JsonDatasetReader(_lowerCAmelCase, features=_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Any ):
"""simple docstring"""
_a = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_a = features.copy()
_a = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = tmp_path / '''cache'''
_a = JsonDatasetReader(_lowerCAmelCase, features=_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_a = JsonDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase, split=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase, _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : List[str] ):
"""simple docstring"""
if issubclass(_lowerCAmelCase, _lowerCAmelCase ):
_a = jsonl_path
elif issubclass(_lowerCAmelCase, _lowerCAmelCase ):
_a = [jsonl_path]
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_a = JsonDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
for split in splits:
_a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=_lowerCAmelCase, keep_in_memory=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase, _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : str, _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_a = features.copy() if features else default_expected_features
_a = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = JsonDatasetReader({'''train''': jsonl_path}, features=_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase, _lowerCAmelCase )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
if split:
_a = {split: jsonl_path}
else:
_a = '''train'''
_a = {'''train''': jsonl_path, '''test''': jsonl_path}
_a = tmp_path / '''cache'''
_a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_a = JsonDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase, _lowerCAmelCase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return json.load(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
return [json.loads(_lowerCAmelCase ) for line in buffer]
class __lowerCamelCase :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase ).write()
buffer.seek(0 )
_a = load_json_function(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert isinstance(exported_content[0] , __UpperCAmelCase )
assert len(__UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase , orient=__UpperCAmelCase ).write()
buffer.seek(0 )
_a = load_json(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCAmelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
_a = load_json_function(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert isinstance(exported_content[0] , __UpperCAmelCase )
assert len(__UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase , orient=__UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
_a = load_json(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCAmelCase ) == 10
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
with pytest.raises(__UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_a = tmp_path_factory.mktemp('''data''' ) / F'test.json.{extension}'
_a = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , compression=__UpperCAmelCase ).write()
with fsspec.open(__UpperCAmelCase , '''rb''' , compression='''infer''' ) as f:
_a = f.read()
with fsspec.open(__UpperCAmelCase , '''rb''' , compression='''infer''' ) as f:
_a = f.read()
assert exported_content == original_content
| 320 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_a = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 320 | 1 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_a = Vector()
def _UpperCAmelCase ( self ) -> None:
_a = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCAmelCase ) , '''(0,0,0,0,0,1)''' )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCAmelCase ) , 4 )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2] )
_a = Vector([1, 2, 3, 4, 5] )
_a = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2, 3] )
_a = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2, 3] )
_a = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2, 3] )
_a = Vector([2, -1, 4] ) # for test of dot product
_a = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def _UpperCAmelCase ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def _UpperCAmelCase ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 2, 3] )
_a = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __UpperCAmelCase , __UpperCAmelCase ) ) , '''(3,4,7)''' )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 0, 0, 0, 0, 0] )
_a = x.copy()
self.assertEqual(str(__UpperCAmelCase ) , str(__UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> None:
_a = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__UpperCAmelCase ) , '''(0,1,0)''' )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__UpperCAmelCase , __UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCAmelCase , __UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_a = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def _UpperCAmelCase ( self ) -> None:
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def _UpperCAmelCase ( self ) -> None:
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 320 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __lowerCamelCase ( a__ , a__ ):
'''simple docstring'''
A_ : List[str] = 'dinat'
A_ : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=[3, 4, 6, 5] , __UpperCAmelCase=[2, 4, 8, 16] , __UpperCAmelCase=7 , __UpperCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __UpperCAmelCase=3.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
super().__init__(**__UpperCAmelCase )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(__UpperCAmelCase )
_a = num_heads
_a = kernel_size
_a = dilations
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
_a = layer_scale_init_value
_a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 320 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 1 |
"""simple docstring"""
class __lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_a = row
_a = col
_a = graph
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
# Checking all 8 elements surrounding nth element
_a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_a = [-1, 0, 1, -1, 1, -1, 0, 1]
_a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int: # And finally, count all islands.
_a = [[False for j in range(self.COL )] for i in range(self.ROW )]
_a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += 1
return count
| 320 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__snake_case = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__snake_case = concatenate_datasets
__snake_case = DownloadConfig
__snake_case = DownloadManager
__snake_case = DownloadMode
__snake_case = DownloadConfig
__snake_case = DownloadMode
__snake_case = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 320 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 320 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Any = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_rescale
_a = rescale_factor
_a = do_center_crop
_a = crop_size
_a = do_flip_channel_order
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PIL.Image.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> int:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> np.ndarray:
return flip_channel_order(__UpperCAmelCase , data_format=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = resample if resample is not None else self.resample
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_a = [self.flip_channel_order(image=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Any:
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__UpperCAmelCase ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(__UpperCAmelCase ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 320 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'bart'
A_ : Optional[Any] = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple:
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
_a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__UpperCAmelCase , __UpperCAmelCase )
_a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_a = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 320 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ : ClassVar[Features] = Features({'audio': Audio()} )
A_ : ClassVar[Features] = Features({'transcription': Value('string' )} )
A_ : str = "audio"
A_ : str = "transcription"
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , __UpperCAmelCase ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
_a = copy.deepcopy(self )
_a = self.input_schema.copy()
_a = features[self.audio_column]
_a = input_schema
return task_template
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 320 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args)
| 320 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = AlbertTokenizer
A_ : Any = AlbertTokenizerFast
A_ : Optional[int] = True
A_ : List[str] = True
A_ : Any = True
def _UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
_a = AlbertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
_a = '''this is a test'''
_a = '''this is a test'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> str:
_a = '''<pad>'''
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(__UpperCAmelCase ) , 30000 )
def _UpperCAmelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = '''I was born in 92000, and this is falsé.'''
_a = tokenizer.tokenize(__UpperCAmelCase )
_a = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(__UpperCAmelCase )
_a = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
_a = AlbertTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
_a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [48, 25, 21, 1289] )
_a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = AlbertTokenizer(__UpperCAmelCase )
_a = tokenizer.encode('''sequence builders''' )
_a = tokenizer.encode('''multi-sequence build''' )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 320 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'gptj'
A_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_a = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13
| 320 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__snake_case = get_logger(__name__)
__snake_case = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCamelCase :
'''simple docstring'''
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCamelCase :
'''simple docstring'''
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> jnp.ndarray:
for processor in self:
_a = inspect.signature(processor.__call__ ).parameters
if len(__UpperCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
_a = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
else:
_a = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
_a = temperature
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
_a = scores / self.temperature
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = -float('''Inf''' ) , __UpperCAmelCase = 1 ) -> Union[str, Any]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
_a = top_p
_a = filter_value
_a = min_tokens_to_keep
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
_a , _a = lax.top_k(__UpperCAmelCase , scores.shape[-1] )
_a = jnp.full_like(__UpperCAmelCase , self.filter_value )
_a = jax.nn.softmax(__UpperCAmelCase , axis=-1 ).cumsum(axis=-1 )
_a = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_a = jnp.roll(__UpperCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__UpperCAmelCase )
# min tokens to keep
_a = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCAmelCase )
_a = jnp.where(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = jax.lax.sort_key_val(__UpperCAmelCase , __UpperCAmelCase )[-1]
return next_scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = -float('''Inf''' ) , __UpperCAmelCase = 1 ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
_a = max(__UpperCAmelCase , __UpperCAmelCase )
_a = filter_value
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
_a , _a = scores.shape
_a = jnp.full(batch_size * vocab_size , self.filter_value )
_a = min(self.top_k , scores.shape[-1] ) # Safety check
_a , _a = lax.top_k(__UpperCAmelCase , __UpperCAmelCase )
_a = jnp.broadcast_to((jnp.arange(__UpperCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_a = topk_scores.flatten()
_a = topk_indices.flatten() + shift
_a = next_scores_flat.at[topk_indices_flat].set(__UpperCAmelCase )
_a = next_scores_flat.reshape(__UpperCAmelCase , __UpperCAmelCase )
return next_scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> str:
_a = bos_token_id
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
_a = jnp.full(scores.shape , -float('''inf''' ) )
_a = 1 - jnp.bool_(cur_len - 1 )
_a = jnp.where(__UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __UpperCAmelCase )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = max_length
_a = eos_token_id
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
_a = jnp.full(scores.shape , -float('''inf''' ) )
_a = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_a = jnp.where(__UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __UpperCAmelCase )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
_a = min_length
_a = eos_token_id
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
_a = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_a = jnp.where(__UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , __UpperCAmelCase )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = list(__UpperCAmelCase )
_a = begin_index
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_a = 1 - jnp.bool_(cur_len - self.begin_index )
_a = jnp.where(__UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , __UpperCAmelCase )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Tuple:
_a = list(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
_a = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> List[str]:
_a = dict(__UpperCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_a = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_a = force_token_array.at[index].set(__UpperCAmelCase )
_a = jnp.intaa(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
def _force_token(__UpperCAmelCase ):
_a = scores.shape[0]
_a = self.force_token_array[generation_idx]
_a = jnp.ones_like(__UpperCAmelCase , dtype=scores.dtype ) * -float('''inf''' )
_a = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_a = lax.dynamic_update_slice(__UpperCAmelCase , __UpperCAmelCase , (0, current_token) )
return new_scores
_a = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__UpperCAmelCase ) , lambda: scores , ) , )
return scores
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = generate_config.eos_token_id
_a = generate_config.no_timestamps_token_id
_a = generate_config.no_timestamps_token_id + 1
_a = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__UpperCAmelCase , '''max_initial_timestamp_index''' ):
_a = generate_config.max_initial_timestamp_index
else:
_a = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_a = model_config.vocab_size
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
# suppress <|notimestamps|> which is handled by without_timestamps
_a = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(__UpperCAmelCase , __UpperCAmelCase ):
_a = jnp.where((cur_len - self.begin_index) >= 1 , __UpperCAmelCase , __UpperCAmelCase )
_a = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __UpperCAmelCase , )
_a = jnp.where((cur_len - self.begin_index) < 2 , __UpperCAmelCase , __UpperCAmelCase )
_a = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __UpperCAmelCase , __UpperCAmelCase , )
return jnp.where(
__UpperCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , __UpperCAmelCase , )
_a = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase , __UpperCAmelCase )
_a = jnp.where(cur_len == self.begin_index , __UpperCAmelCase , __UpperCAmelCase )
_a = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __UpperCAmelCase , )
_a = self.timestamp_begin + self.max_initial_timestamp_index
_a = jnp.where(
__UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , __UpperCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
_a = jax.nn.log_softmax(__UpperCAmelCase , axis=-1 )
def handle_cumulative_probs(__UpperCAmelCase , __UpperCAmelCase ):
_a = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_a = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , __UpperCAmelCase , )
_a = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase , __UpperCAmelCase )
return scores
| 320 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
| 320 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = np.array(_lowerCAmelCase )
_a = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> int:
_a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = '''facebook/sam-vit-huge'''
_a = pipeline('''mask-generation''' , model=__UpperCAmelCase )
_a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 320 | 1 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__snake_case = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__snake_case = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__snake_case = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 320 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 320 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_a = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_a = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = vqa_pipeline(__UpperCAmelCase , top_k=1 )
self.assertEqual(
__UpperCAmelCase , [
[{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}],
[{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}],
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = '''How many cats are there?'''
_a = vqa_pipeline(image=__UpperCAmelCase , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}] )
_a = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase )}] )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Any:
_a = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_a = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a = '''How many cats are there?'''
_a = vqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_a = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_a = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _UpperCAmelCase ( self ) -> int:
pass
| 320 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
| 320 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ) -> Any:
a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=a , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=a , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=a )
return parser.parse_args()
def _a ( ) -> Tuple:
a = parse_args()
# Import training_script as a module.
a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a = script_fpath.stem
a = importlib.import_module(a )
# Patch sys.argv
a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 320 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={'vocab_file': 'vocab.json'}
SCREAMING_SNAKE_CASE_: int ={
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={'mgp-str': 27}
class __A ( UpperCamelCase__ ):
a__ : str = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : str , __a : Tuple , __a : str="[GO]" , __a : int="[GO]" , __a : Tuple="[s]" , __a : List[Any]="[GO]" , **__a : Any ):
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = {v: k for k, v in self.vocab.items()}
@property
def _lowercase (self : List[Any] ):
return len(self.vocab )
def _lowercase (self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase (self : Optional[int] , __a : Optional[Any] ):
UpperCAmelCase_ = []
for s in text:
char_tokens.extend(__a )
return char_tokens
def _lowercase (self : str , __a : str ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def _lowercase (self : int , __a : str ):
return self.decoder.get(__a )
def _lowercase (self : Tuple , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
return (vocab_file,)
| 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''google/rembert''': 256,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = RemBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 320 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : str
lowerCAmelCase__ : str
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : Optional[str] = None
@dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : List[int]
lowerCAmelCase__ : Optional[List[int]] = None
lowerCAmelCase__ : Optional[List[int]] = None
lowerCAmelCase__ : Optional[Union[int, float]] = None
lowerCAmelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[InputFeatures]
def __init__(self : Optional[int] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : List[Any]=False , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowercase__ = hans_processors[task]()
lowercase__ = os.path.join(
UpperCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , )
lowercase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ ,lowercase__ = label_list[2], label_list[1]
lowercase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + '''.lock'''
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowercase__ = torch.load(UpperCamelCase )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowercase__ = (
processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
)
logger.info('''Training examples: %s''' , len(UpperCamelCase ) )
lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
logger.info('''Saving features into cached file %s''' , UpperCamelCase )
torch.save(self.features , UpperCamelCase )
def __len__(self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__(self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
return self.features[i]
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : List[InputFeatures]
def __init__(self : List[Any] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 128 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowercase__ = hans_processors[task]()
lowercase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ ,lowercase__ = label_list[2], label_list[1]
lowercase__ = label_list
lowercase__ = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__ = tf.data.Dataset.from_generator(
UpperCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return self.dataset
def __len__(self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__(self : Union[str, Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
return self.features[i]
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.label_list
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = []
for i, line in enumerate(UpperCamelCase ):
if i == 0:
continue
lowercase__ = '''%s-%s''' % (set_type, line[0])
lowercase__ = line[5]
lowercase__ = line[6]
lowercase__ = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowercase__ = line[0]
examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) )
return examples
def _SCREAMING_SNAKE_CASE (A , A , A , A , ) -> int:
"""simple docstring"""
lowercase__ = {label: i for i, label in enumerate(A )}
lowercase__ = []
for ex_index, example in tqdm.tqdm(enumerate(A ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowercase__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=A , max_length=A , padding='''max_length''' , truncation=A , return_overflowing_tokens=A , )
lowercase__ = label_map[example.label] if example.label in label_map else 0
lowercase__ = int(example.pairID )
features.append(InputFeatures(**A , label=A , pairID=A ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
lowerCamelCase : Tuple = {
'hans': 3,
}
lowerCamelCase : str = {
'hans': HansProcessor,
}
| 2 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = 1.5
A : Dict = int(factor * num_class_images )
A : Tuple = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=snake_case__ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
A : Tuple = client.query(text=snake_case__ )
if len(snake_case__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
A : Dict = int(factor * num_images )
A : Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 , )
A : Dict = 0
A : Optional[int] = 0
A : Tuple = tqdm(desc='''downloading real regularization images''' , total=snake_case__ )
with open(F'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(F'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
F'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
A : Optional[Any] = class_images[count]
count += 1
try:
A : Dict = requests.get(images['''url'''] )
if img.status_code == 200:
A : Tuple = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = argparse.ArgumentParser('''''' , add_help=snake_case__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case__ , type=snake_case__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case__ , type=snake_case__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case__ )
return parser.parse_args()
if __name__ == "__main__":
lowercase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 3 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase ) -> int:
_lowercase =data
def __iter__(self ) -> Optional[Any]:
for element in self.data:
yield element
def UpperCAmelCase_ ( __snake_case=True ) -> Tuple:
"""simple docstring"""
_lowercase =Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> List[Any]:
"""simple docstring"""
if iterable:
_lowercase =DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
_lowercase =TensorDataset(torch.as_tensor(range(__snake_case ) ) )
_lowercase =DataLoader(__snake_case , batch_size=__snake_case )
_lowercase =accelerator.prepare(__snake_case )
return dl
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> str:
"""simple docstring"""
_lowercase =create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
_lowercase =[len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_lowercase =create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_lowercase =create_accelerator(even_batches=__snake_case )
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
_lowercase =[]
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
_lowercase =ddp_model(batch[0].float() )
_lowercase =output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase =True
_lowercase =False
_lowercase =create_accelerator(even_batches=__snake_case )
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
_lowercase =train_dl.batch_sampler.even_batches
_lowercase =valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
_lowercase =True
_lowercase =False
_lowercase =create_accelerator(even_batches=__snake_case )
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
_lowercase =batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
_lowercase =create_accelerator()
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_lowercase =create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
_lowercase =accelerator.state.distributed_type
_lowercase =DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
_lowercase =original_state
if __name__ == "__main__":
main()
| 5 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__snake_case = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def remove_articles(_lowerCAmelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Tuple ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(_lowerCAmelCase )
_a = Counter(_lowerCAmelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(_lowerCAmelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ):
"""simple docstring"""
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] )
_a = sari_score / len(_lowerCAmelCase )
return 1_00 * sari_score
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ):
"""simple docstring"""
_a = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_a = sacrebleu.corpus_bleu(
_lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = {}
result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 320 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A( a ):
snake_case_ = (PNDMScheduler,)
snake_case_ = (('''num_inference_steps''', 5_0),)
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> str:
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_snake_case )
return config
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , **_snake_case ) -> Any:
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop('''num_inference_steps''' , _snake_case )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**_snake_case )
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
__a = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__a = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__a = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop('''num_inference_steps''' , _snake_case )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
__a = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__a = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__a = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**_snake_case )
__a = scheduler_class(**_snake_case )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
__a = model(_snake_case , _snake_case )
__a = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__a = model(_snake_case , _snake_case )
__a = scheduler.step_plms(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop('''num_inference_steps''' , _snake_case )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , '''set_timesteps''' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , '''set_timesteps''' ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.step_prk(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
__a = scheduler.step_prk(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step_plms(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
__a = scheduler.step_plms(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(steps_offset=1 )
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = 27
for scheduler_class in self.scheduler_classes:
__a = self.dummy_sample
__a = 0.1 * sample
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__a = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_snake_case ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.full_loop()
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.full_loop(prediction_type='''v_prediction''' )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 6 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_a = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 320 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase_ = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : str ) ->List[Any]:
return self.sp_model.get_piece_size()
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) ->Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple:
return self.sp_model.piece_to_id(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str:
snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase )
snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
snake_case_ = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) )
else:
snake_case_ = ''''''.join(_UpperCamelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 8 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase__ )
if number < 0:
return False
__SCREAMING_SNAKE_CASE : Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
import heapq
import sys
import numpy as np
lowerCAmelCase__ = tuple[int, int]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Optional[int]:
_A : List[str] = []
_A : Optional[int] = set()
def _lowerCamelCase ( self) -> Any:
if not self.empty():
return self.elements[0][0]
else:
return float("inf")
def _lowerCamelCase ( self) -> List[str]:
return len(self.elements) == 0
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(__lowerCamelCase)
else:
# update
# print("update", item)
_A : Optional[Any] = []
((_A) , (_A)) : Dict = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((_A) , (_A)) : Optional[Any] = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
if item in self.set:
self.set.remove(__lowerCamelCase)
_A : Any = []
((_A) , (_A)) : List[Any] = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((_A) , (_A)) : Union[str, Any] = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def _lowerCamelCase ( self) -> Any:
return self.elements[0][1]
def _lowerCamelCase ( self) -> Optional[Any]:
((_A) , (_A)) : Any = heapq.heappop(self.elements)
self.set.remove(__lowerCamelCase)
return (priority, item)
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
# euclidean distance
_A : Optional[Any] = np.array(UpperCamelCase__ )
_A : str = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
_A : str = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
_A : List[Any] = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_A : Optional[int] = "*"
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
_A : Dict = "#"
_A : Optional[int] = "-"
_A : Union[str, Any] = back_pointer[goal]
while x != start:
((_A) , (_A)) : Optional[Any] = x
# print(x)
_A : Union[str, Any] = "-"
_A : List[Any] = back_pointer[x]
_A : int = "-"
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
_A : Optional[Any] = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=" " )
_A : List[Any] = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def _UpperCAmelCase (UpperCamelCase__ : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((_A) , (_A)) : int = s
_A : Dict = (x - 1, y)
_A : Any = (x + 1, y)
_A : Dict = (x, y + 1)
_A : Union[str, Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
_A : Dict = -1
_A : List[str] = float("inf" )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
_A : Union[str, Any] = g_function[s] + 1
_A : int = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCAmelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCAmelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCAmelCase__ = make_common_ground()
lowerCAmelCase__ = blocks_blk
# hyper parameters
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
lowerCAmelCase__ = 20
lowerCAmelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (n - 1, n - 1)
lowerCAmelCase__ = 1
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
_A : Optional[int] = {start: 0, goal: float("inf" )}
_A : int = {start: -1, goal: -1}
_A : Dict = []
_A : Dict = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
_A : list[int] = []
_A : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
_A , _A : Tuple = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
_A : Dict = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 11 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__( datasets.Metric):
def lowerCAmelCase__ ( self: Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any]=[1, 10, 1_00] , UpperCamelCase_: str=4 , UpperCamelCase_: Optional[Any]=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=UpperCamelCase_ ) as executor:
__lowerCamelCase = []
__lowerCamelCase = Counter()
__lowerCamelCase = 0
__lowerCamelCase = defaultdict(UpperCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ ) ):
for candidate in candidates:
__lowerCamelCase = candidate + """\n""" + test_case
__lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCamelCase = executor.submit(UpperCamelCase_ , *UpperCamelCase_ )
futures.append(UpperCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase_ ):
__lowerCamelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__lowerCamelCase, __lowerCamelCase = [], []
for result in results.values():
result.sort()
__lowerCamelCase = [r[1]["""passed"""] for r in result]
total.append(len(UpperCamelCase_ ) )
correct.append(sum(UpperCamelCase_ ) )
__lowerCamelCase = np.array(UpperCamelCase_ )
__lowerCamelCase = np.array(UpperCamelCase_ )
__lowerCamelCase = k
__lowerCamelCase = {F'pass@{k}': estimate_pass_at_k(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase__ ( A__ : int , A__ : List[Any] , A__ : List[Any] ):
'''simple docstring'''
def estimator(A__ : int , A__ : int , A__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A__ , A__ ):
__lowerCamelCase = itertools.repeat(A__ , len(A__ ) )
else:
assert len(A__ ) == len(A__ )
__lowerCamelCase = iter(A__ )
return np.array([estimator(int(A__ ) , int(A__ ) , A__ ) for n, c in zip(A__ , A__ )] )
| 12 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : str = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = TaTokenizer
_UpperCAmelCase : List[int] = []
def __init__( self : Any , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Tuple , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_: Optional[int] = [F"<extra_id_{i}>" for i in range(lowerCAmelCase__)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE_: List[str] = len(set(filter(lambda lowerCAmelCase__: bool("extra_id_" in str(lowerCAmelCase__)) , lowerCAmelCase__)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Dict = vocab_file
SCREAMING_SNAKE_CASE_: Dict = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE_: List[str] = extra_ids
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE_: List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase__ , )
return max_model_length
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file , lowerCAmelCase__)
logger.info(F"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return list(
set(filter(lambda lowerCAmelCase__: bool(re.search(R"<extra_id_\d+>" , lowerCAmelCase__)) is not None , self.additional_special_tokens)))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return [self.convert_tokens_to_ids(lowerCAmelCase__) for token in self.get_sentinel_tokens()]
| 13 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
from __future__ import annotations
_lowerCamelCase : Optional[Any] = 1.60_21E-19 # units = C
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 320 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
__A = AutoConfig.from_pretrained(a_ )
__A = FlaxAutoModelForSeqaSeqLM.from_config(config=a_ )
__A = checkpoints.load_tax_checkpoint(a_ )
__A = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__A = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__A = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__A = F'''layers_{str(a_ )}'''
# Self-Attention
__A = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__A = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__A = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__A = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__A = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__A = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__A = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__A = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__A = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__A = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__A = flax_model.params["encoder"]["block"][str(a_ )]["layer"]
__A = tax_attention_key
__A = tax_attention_out
__A = tax_attention_query
__A = tax_attention_value
__A = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A = tax_global_layer_norm
if split_mlp_wi:
__A = tax_mlp_wi_a
__A = tax_mlp_wi_a
else:
__A = tax_mlp_wi
__A = tax_mlp_wo
__A = tax_mlp_layer_norm
__A = flax_model_encoder_layer_block
# Only for layer 0:
__A = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__A = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__A = tax_encoder_global_rel_embedding
# Assigning
__A = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__A = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__A = F'''layers_{str(a_ )}'''
# Self-Attention
__A = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__A = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__A = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__A = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__A = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__A = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__A = tax_enc_dec_attention_module["key"]["kernel"]
__A = tax_enc_dec_attention_module["out"]["kernel"]
__A = tax_enc_dec_attention_module["query"]["kernel"]
__A = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__A = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__A = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__A = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__A = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__A = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__A = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__A = flax_model.params["decoder"]["block"][str(a_ )]["layer"]
__A = tax_attention_key
__A = tax_attention_out
__A = tax_attention_query
__A = tax_attention_value
__A = tax_pre_attention_layer_norm
__A = tax_enc_dec_attention_key
__A = tax_enc_dec_attention_out
__A = tax_enc_dec_attention_query
__A = tax_enc_dec_attention_value
__A = tax_cross_layer_norm
if split_mlp_wi:
__A = tax_mlp_wi_a
__A = tax_mlp_wi_a
else:
__A = tax_mlp_wi
__A = tax_mlp_wo
__A = txa_mlp_layer_norm
__A = flax_model_decoder_layer_block
# Decoder Normalization
__A = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__A = txa_decoder_norm
# Only for layer 0:
__A = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__A = tax_decoder_rel_embedding
# Token Embeddings
__A = tax_model["target"]["token_embedder"]["embedding"]
__A = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__A = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(a_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 15 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'bart'
A_ : Optional[Any] = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple:
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
_a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__UpperCAmelCase , __UpperCAmelCase )
_a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_a = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 320 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
)
def __UpperCAmelCase ( ) -> None:
lowercase__ : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ : Optional[Any] = math.log(len(__lowerCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args)
| 320 | 0 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'gptj'
A_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_a = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13
| 320 | 0 |
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(lowerCAmelCase ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE_ : List[str] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ : Any = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i, name in enumerate(lowerCAmelCase ):
for letter in name:
name_score += ord(lowerCAmelCase ) - 6_4
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution())
| 18 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
| 320 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = 10
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = [1, 2, 3, 4]
lowerCamelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
self.assertEqual(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = ""
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
lowerCamelCase_ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowercase , lowercase )
lowerCamelCase_ = ["It was the best of times."]
self.assertEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = torch.tensor([1, 2, 3, 4] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = 101
lowerCamelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowerCamelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase_ = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase )
| 19 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = np.array(_lowerCAmelCase )
_a = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> int:
_a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = '''facebook/sam-vit-huge'''
_a = pipeline('''mask-generation''' , model=__UpperCAmelCase )
_a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 320 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Union[str, Any]= IFInpaintingSuperResolutionPipeline
_a : Tuple= TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_a : str= TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
_a : Tuple= PipelineTesterMixin.required_optional_params - {"latents"}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("""mps""" ):
lowercase : int = torch.manual_seed(snake_case )
else:
lowercase : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
lowercase : str = floats_tensor((1, 3, 16, 16) ,rng=random.Random(snake_case ) ).to(snake_case )
lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(snake_case ) ).to(snake_case )
lowercase : str = floats_tensor((1, 3, 32, 32) ,rng=random.Random(snake_case ) ).to(snake_case )
lowercase : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 20 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 320 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE : Optional[Any] = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 32
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = 16 , lowerCamelCase_ = "bert-base-cased" ) -> Optional[Any]:
_lowercase : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_lowercase : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_lowercase : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase : List[str] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowercase : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
_lowercase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
# Initialize accelerator
_lowercase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : Optional[int] = config['lr']
_lowercase : Tuple = int(config['num_epochs'] )
_lowercase : int = int(config['seed'] )
_lowercase : Tuple = int(config['batch_size'] )
_lowercase : Any = args.model_name_or_path
set_seed(lowerCamelCase_ )
_lowercase , _lowercase : str = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
_lowercase : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase : int = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_lowercase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase : Tuple = 1
_lowercase : Union[str, Any] = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
_lowercase : Dict = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
_lowercase : Any = 0
# We also need to keep track of the stating epoch so files are named properly
_lowercase : Union[str, Any] = 0
# Now we train the model
_lowercase : Tuple = evaluate.load('glue' , 'mrpc' )
_lowercase : Any = 0
_lowercase : Optional[Any] = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = model(**lowerCamelCase_ )
_lowercase : Tuple = outputs.loss
_lowercase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowercase : int = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase_ ) - 1:
_lowercase : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
_lowercase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ )
_lowercase : List[str] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowercase : Union[str, Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase_ , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowerCamelCase_ , default=3 , help='Number of train epochs.' , )
_lowercase : List[str] = parser.parse_args()
_lowercase : List[str] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
| 320 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 320 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : Dict=7 , __snake_case : List[str]=3 , __snake_case : Any=18 , __snake_case : Optional[int]=30 , __snake_case : List[Any]=400 , __snake_case : Any=True , __snake_case : Union[str, Any]=None , __snake_case : List[str]=True , __snake_case : Dict=None , __snake_case : Dict=True , __snake_case : Any=[0.5, 0.5, 0.5] , __snake_case : Tuple=[0.5, 0.5, 0.5] , ) -> Optional[int]:
UpperCAmelCase : List[str] = size if size is not None else {'''shortest_edge''': 18}
UpperCAmelCase : Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Optional[Any] = min_resolution
UpperCAmelCase : List[str] = max_resolution
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : Dict = do_center_crop
UpperCAmelCase : Dict = crop_size
UpperCAmelCase : Union[str, Any] = do_normalize
UpperCAmelCase : int = image_mean
UpperCAmelCase : Union[str, Any] = image_std
def A ( self : List[str] ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = LevitImageProcessor if is_vision_available() else None
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = LevitImageProcessingTester(self )
@property
def A ( self : int ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def A ( self : int ) -> Any:
UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : Tuple ) -> Dict:
pass
def A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Any = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Dict = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 23 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''google/rembert''': 256,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = RemBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 320 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = 0
__UpperCamelCase : bool = False
__UpperCamelCase : float = 3.0
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=SCREAMING_SNAKE_CASE__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE__ : Tuple = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE__ : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , SCREAMING_SNAKE_CASE__ )
@require_multi_gpu
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
UpperCAmelCase__ : Union[str, Any] = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase__ : Any = torch.nn.Linear(1_0_0, 2_0_0)
UpperCAmelCase__ : Optional[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase__ : Tuple = ''
UpperCAmelCase__ : List[str] = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320 | 0 |
def lowerCAmelCase_ ( ):
return [
a * b * (1000 - a - b)
for a in range(1,999 )
for b in range(snake_case_,999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowercase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , *__a , **__a ):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 27 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__snake_case = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def remove_articles(_lowerCAmelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Tuple ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(_lowerCAmelCase )
_a = Counter(_lowerCAmelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(_lowerCAmelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ):
"""simple docstring"""
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] )
_a = sari_score / len(_lowerCAmelCase )
return 1_00 * sari_score
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ):
"""simple docstring"""
_a = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_a = sacrebleu.corpus_bleu(
_lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = {}
result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 320 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
if not isinstance(A__ , A__ ):
UpperCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(A__ )
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_a = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 320 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320 | 0 |
def a ( snake_case__: int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
lowercase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase_ = 1
if upper_limit > 0:
lowercase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__a = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 30 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase_ : List[Any] = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Any:
"""simple docstring"""
a_ : Optional[int] = {}
state_dict.pop('pixel_mean' , __A )
state_dict.pop('pixel_std' , __A )
a_ : Dict = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a_ : Tuple = key.replace(__A , __A )
if re.match(__A , __A ):
a_ : int = int(re.match(__A , __A ).group(2 ) )
if layer_nb == 0:
a_ : Dict = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
a_ : List[str] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
a_ : int = key.replace('layers.2' , 'proj_out' )
a_ : List[Any] = value
a_ : Optional[int] = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : List[Any] , __A : Tuple="ybelkada/segment-anything" ) -> str:
"""simple docstring"""
a_ : Tuple = hf_hub_download(__A , F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
a_ : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
a_ : Union[str, Any] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
a_ : Optional[int] = SamConfig(
vision_config=__A , )
elif "sam_vit_h" in model_name:
a_ : List[str] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
a_ : int = SamConfig(
vision_config=__A , )
a_ : str = torch.load(__A , map_location='cpu' )
a_ : Tuple = replace_keys(__A )
a_ : Optional[Any] = SamImageProcessor()
a_ : Any = SamProcessor(image_processor=__A )
a_ : List[Any] = SamModel(__A )
hf_model.load_state_dict(__A )
a_ : Dict = hf_model.to('cuda' )
a_ : Optional[Any] = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
a_ : List[Any] = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' )
a_ : Dict = [[[4_00, 6_50]]]
a_ : str = [[1]]
a_ : Optional[Any] = processor(images=np.array(__A ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a_ : str = hf_model(**__A )
a_ : List[str] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
a_ : List[str] = processor(
images=np.array(__A ) , input_points=__A , input_labels=__A , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a_ : Optional[Any] = hf_model(**__A )
a_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
a_ : Any = ((75, 2_75, 17_25, 8_50),)
a_ : Optional[int] = processor(images=np.array(__A ) , input_boxes=__A , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a_ : str = hf_model(**__A )
a_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
a_ : Union[str, Any] = [[[4_00, 6_50], [8_00, 6_50]]]
a_ : Optional[int] = [[1, 1]]
a_ : Any = processor(
images=np.array(__A ) , input_points=__A , input_labels=__A , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
a_ : Optional[int] = hf_model(**__A )
a_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
UpperCAmelCase_ : Optional[int] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 32 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[str] = '''▁'''
__A : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__A : Union[str, Any] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
__A : Optional[Any] = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , A : List[Any] , A : List[str]="<s>" , A : Optional[int]="</s>" , A : Optional[int]="</s>" , A : Tuple="<s>" , A : List[str]="<unk>" , A : Tuple="<pad>" , A : Tuple="<mask>" , A : Optional[Dict[str, Any]] = None , **A : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowercase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowercase_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[Any] = 1
lowercase_ : Tuple = len(self.sp_model ) + self.fairseq_offset
lowercase_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ) -> Dict:
lowercase_ : Tuple = self.__dict__.copy()
lowercase_ : str = None
lowercase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , A : Tuple ) -> int:
lowercase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self : Tuple , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
lowercase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : str , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def A ( self : int , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : str = [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Optional[int] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A ( self : List[str] ) -> Optional[Any]:
lowercase_ : List[str] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Dict , A : str ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def A ( self : Any , A : int ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Dict = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Dict , A : str ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : List[str] , A : Tuple ) -> List[Any]:
lowercase_ : Tuple = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def A ( self : Any , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : int = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowercase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 33 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A =['bert-base-uncased', 'bert-base-cased']
A ='hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class _a ( tf.keras.Model ):
def __init__( self : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = tokenizer
UpperCAmelCase = AutoConfig.from_pretrained(lowercase )
UpperCAmelCase = TFAutoModel.from_config(lowercase )
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(lowercase )
UpperCAmelCase = self.bert(**lowercase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _a ( unittest.TestCase ):
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
BertTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase = [TFBertTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowercase , use_fast_bert_tokenizer=lowercase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase = tokenizer(lowercase , return_tensors='''tf''' , padding='''longest''' )
UpperCAmelCase = tf_tokenizer(lowercase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = tf_tokenizer(self.paired_sentences )
UpperCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = tf.function(lowercase )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase = tf.constant(lowercase )
UpperCAmelCase = compiled_tokenizer(lowercase )
UpperCAmelCase = tf_tokenizer(lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase = ModelToSave(tokenizer=lowercase )
UpperCAmelCase = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase = model(lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase = Path(lowercase ) / '''saved.model'''
model.save(lowercase )
UpperCAmelCase = tf.keras.models.load_model(lowercase )
UpperCAmelCase = loaded_model(lowercase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_a ):
"""simple docstring"""
lowercase = ["keras_nlp"]
def __init__( self : List[Any] , *snake_case_ : Dict , **snake_case_ : int ):
requires_backends(self , ["""keras_nlp"""] )
| 35 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 320 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowerCAmelCase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowerCAmelCase = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
lowerCAmelCase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Dict = char
lowerCAmelCase__ : Any = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,**__UpperCAmelCase ,) -> List[str]:
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[Any] = vocab_file
lowerCAmelCase__ : Dict = merges_file
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : Optional[int] = 3
self.add_from_file(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : int = merges_handle.read().split("""\n""" )[:-1]
lowerCAmelCase__ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase__ : Dict = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Dict = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : List[Any] = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Optional[int] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Dict = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : str = tuple(__UpperCAmelCase )
lowerCAmelCase__ : int = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file ,__UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.merges_file ,__UpperCAmelCase )
return out_vocab_file, out_merge_file
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase__ : Tuple = f.readlines()
for lineTmp in lines:
lowerCAmelCase__ : Optional[int] = lineTmp.strip()
lowerCAmelCase__ : List[str] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
lowerCAmelCase__ : Optional[Any] = line[:idx]
lowerCAmelCase__ : int = len(self.encoder )
| 37 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'bart'
A_ : Optional[Any] = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple:
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
_a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__UpperCAmelCase , __UpperCAmelCase )
_a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_a = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 320 | 0 |
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
if len(__magic_name__ ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase :int = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:]
UpperCamelCase :Union[str, Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :str = B""""""
for char in message:
bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" )
UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__magic_name__ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCamelCase :Tuple = bit_string[pos : pos + 512]
UpperCamelCase :Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :List[str] = format(__magic_name__ , """032b""" )
UpperCamelCase :Any = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :Tuple = preprocess(__magic_name__ )
UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase :Union[str, Any] = 0X67_45_23_01
UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89
UpperCamelCase :List[str] = 0X98_BA_DC_FE
UpperCamelCase :int = 0X10_32_54_76
UpperCamelCase :int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCamelCase :Optional[Any] = aa
UpperCamelCase :Any = ba
UpperCamelCase :Tuple = ca
UpperCamelCase :List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase :int = d ^ (b & (c ^ d))
UpperCamelCase :Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase :str = c ^ (d & (b ^ c))
UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase :str = b ^ c ^ d
UpperCamelCase :Optional[int] = (3 * i + 5) % 16
else:
UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ ))
UpperCamelCase :int = (7 * i) % 16
UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase :Tuple = d
UpperCamelCase :str = c
UpperCamelCase :Tuple = b
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args)
| 320 | 0 |
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = abs(__lowerCAmelCase )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def __A ( )-> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
_UpperCAmelCase = F"""{func.__name__}({value})"""
_UpperCAmelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 39 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'gptj'
A_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_a = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13
| 320 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_A : str ='''facebook/wmt19-en-de'''
_A : List[Any] =FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_A : Any =FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_A : List[Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : Tuple =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Optional[int] =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_A : Any ='''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 41 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = np.array(_lowerCAmelCase )
_a = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> int:
_a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = '''facebook/sam-vit-huge'''
_a = pipeline('''mask-generation''' , model=__UpperCAmelCase )
_a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 320 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase : Optional[Any] = 8
def SCREAMING_SNAKE_CASE__ ( __A , __A=BITS ) -> Tuple:
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
_snake_case = rearrange(__A , 'd -> d 1 1' )
_snake_case = rearrange(__A , 'b c h w -> b c 1 h w' )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(__A , 'b c d h w -> b (c d) h w' )
_snake_case = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE__ ( __A , __A=BITS ) -> Optional[int]:
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
_snake_case = rearrange(__A , 'd -> d 1 1' )
_snake_case = rearrange(__A , 'b (c d) h w -> b c d h w' , d=8 )
_snake_case = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE__ ( self , __A , __A , __A , __A = 0.0 , __A = True , __A=None , __A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(__A , __A )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(__A ) else 'cpu'
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
_snake_case = self._get_variance(__A , __A ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def SCREAMING_SNAKE_CASE__ ( self , __A , __A , __A , __A="epsilon" , __A=None , __A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case , _snake_case = torch.split(__A , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
_snake_case = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1.0 , ):
"""simple docstring"""
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_snake_case = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_snake_case = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 42 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 320 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : List[str] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : List[Any] = False
a__ : int = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> Union[str, Any]:
__UpperCamelCase :List[str] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase)
if return_labels:
if model_class in get_values(__lowercase):
__UpperCamelCase :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Any:
__UpperCamelCase :List[Any] = parent
__UpperCamelCase :Any = batch_size
__UpperCamelCase :Dict = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :List[Any] = use_input_mask
__UpperCamelCase :Optional[int] = use_token_type_ids
__UpperCamelCase :Any = use_labels
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = num_attention_heads
__UpperCamelCase :Dict = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :List[Any] = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :List[str] = max_position_embeddings
__UpperCamelCase :List[str] = type_vocab_size
__UpperCamelCase :Dict = type_sequence_label_size
__UpperCamelCase :str = initializer_range
__UpperCamelCase :int = num_labels
__UpperCamelCase :int = num_choices
__UpperCamelCase :List[str] = scope
__UpperCamelCase :Any = embedding_size
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :str = None
if self.use_input_mask:
__UpperCamelCase :List[str] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :int = None
__UpperCamelCase :str = None
__UpperCamelCase :Any = None
if self.use_labels:
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Dict = TFMobileBertModel(config=__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Dict = model(__lowercase)
__UpperCamelCase :Union[str, Any] = [input_ids, input_mask]
__UpperCamelCase :Optional[Any] = model(__lowercase)
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Dict:
__UpperCamelCase :int = TFMobileBertForMaskedLM(config=__lowercase)
__UpperCamelCase :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Union[str, Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :int = TFMobileBertForNextSentencePrediction(config=__lowercase)
__UpperCamelCase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = TFMobileBertForPreTraining(config=__lowercase)
__UpperCamelCase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = self.num_labels
__UpperCamelCase :Dict = TFMobileBertForSequenceClassification(config=__lowercase)
__UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :List[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = self.num_choices
__UpperCamelCase :Optional[Any] = TFMobileBertForMultipleChoice(config=__lowercase)
__UpperCamelCase :Dict = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Union[str, Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Union[str, Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :Optional[int] = TFMobileBertForTokenClassification(config=__lowercase)
__UpperCamelCase :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :int = TFMobileBertForQuestionAnswering(config=__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[str] = config_and_inputs
__UpperCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
__UpperCamelCase :str = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> List[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCamelCase :List[str] = TFMobileBertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Union[str, Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
__UpperCamelCase :Any = tf.constant([[0, 1, 2, 3, 4, 5]])
__UpperCamelCase :int = model(__lowercase)[0]
__UpperCamelCase :Optional[Any] = [1, 6, 30_522]
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Dict = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-4)
| 43 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
| 320 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase ,2 ) - pow(_lowerCamelCase ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase ,2 ) - pow(_lowerCamelCase ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase ,2 ) + pow(_lowerCamelCase ,2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 320 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : nn.Module
__UpperCAmelCase : List[nn.Module] = field(default_factory=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : list = field(default_factory=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = len(list(m.modules() ) ) == 1 or isinstance(_a , nn.Convad ) or isinstance(_a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_a )
def __call__( self , _a ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_a )
[x.remove() for x in self.handles]
return self
@property
def __UpperCAmelCase ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : nn.Module
__UpperCAmelCase : nn.Module
__UpperCAmelCase : int = 0
__UpperCAmelCase : List = field(default_factory=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List = field(default_factory=__SCREAMING_SNAKE_CASE )
def __call__( self , _a ):
__a = Tracker(self.dest )(_a ).parametrized
__a = Tracker(self.src )(_a ).parametrized
__a = list(filter(lambda _a : type(_a ) not in self.src_skip , _a ) )
__a = list(filter(lambda _a : type(_a ) not in self.dest_skip , _a ) )
if len(_a ) != len(_a ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(_a )} operations while'''
f''' destination module has {len(_a )}.''' )
for dest_m, src_m in zip(_a , _a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : Path , lowerCAmelCase__ : bool = True ) -> Tuple:
print(f'''Converting {name}...''' )
with torch.no_grad():
__a = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ).eval()
__a = ResNetForImageClassification(lowerCAmelCase__ ).eval()
__a = ModuleTransfer(src=lowerCAmelCase__ , dest=lowerCAmelCase__ )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase__ )
assert torch.allclose(from_model(lowerCAmelCase__ ) , our_model(lowerCAmelCase__ ).logits ), "The model logits don't match the original one."
__a = f'''resnet{'-'.join(name.split('resnet' ) )}'''
print(lowerCAmelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
print(f'''Pushed {checkpoint_name}''' )
def lowercase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = True ) -> Optional[Any]:
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ )
__a = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase__ , names_to_config[model_name] , lowerCAmelCase__ , lowerCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 45 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''google/rembert''': 256,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = RemBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 320 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , )
_SCREAMING_SNAKE_CASE =None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_SCREAMING_SNAKE_CASE =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' )
_SCREAMING_SNAKE_CASE =aws_access_key_id
_SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' )
_SCREAMING_SNAKE_CASE =aws_secret_access_key
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_SCREAMING_SNAKE_CASE =aws_region
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , )
if role_management == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' )
else:
_SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role'
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_SCREAMING_SNAKE_CASE ='dynamo_'
_SCREAMING_SNAKE_CASE =_ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_SCREAMING_SNAKE_CASE =_ask_options(
'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE =_ask_options(
_UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' )
_SCREAMING_SNAKE_CASE =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE =_ask_field(
'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , )
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
| 47 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
require_version(deps[pkg] ,_SCREAMING_SNAKE_CASE )
| 48 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__snake_case = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def remove_articles(_lowerCAmelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Tuple ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(_lowerCAmelCase )
_a = Counter(_lowerCAmelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(_lowerCAmelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ):
"""simple docstring"""
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] )
_a = sari_score / len(_lowerCAmelCase )
return 1_00 * sari_score
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ):
"""simple docstring"""
_a = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_a = sacrebleu.corpus_bleu(
_lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = {}
result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 320 | 0 |
from timeit import timeit
_UpperCAmelCase : Union[str, Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) // 2
lowerCamelCase__ : Tuple = len(_UpperCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if len(_UpperCAmelCase ) <= 2:
return True
if s[0] == s[len(_UpperCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return s == s[::-1]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : Any = F"""all({name}(key) is value for key, value in test_data.items())"""
lowerCamelCase__ : List[Any] = F"""from __main__ import test_data, {name}"""
lowerCamelCase__ : int = 50_0000
lowerCamelCase__ : List[str] = timeit(stmt=_UpperCAmelCase , setup=_UpperCAmelCase , number=_UpperCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 50 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_a = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 320 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = '''sew-d'''
def __init__( self : List[str] , _snake_case : Tuple=32 , _snake_case : Dict=768 , _snake_case : str=12 , _snake_case : Optional[int]=12 , _snake_case : Optional[int]=3072 , _snake_case : str=2 , _snake_case : Tuple=512 , _snake_case : Optional[Any]=256 , _snake_case : Tuple=True , _snake_case : Dict=True , _snake_case : Optional[Any]=("p2c", "c2p") , _snake_case : int="layer_norm" , _snake_case : Any="gelu_python" , _snake_case : Any=0.1 , _snake_case : Any=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[str]=0.0 , _snake_case : Optional[int]=0.1 , _snake_case : int=0.0_2 , _snake_case : Dict=1e-7 , _snake_case : str=1e-5 , _snake_case : Optional[Any]="group" , _snake_case : List[str]="gelu" , _snake_case : Union[str, Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _snake_case : int=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _snake_case : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _snake_case : int=False , _snake_case : List[str]=128 , _snake_case : Optional[int]=16 , _snake_case : int=True , _snake_case : List[Any]=0.0_5 , _snake_case : str=10 , _snake_case : Any=2 , _snake_case : Tuple=0.0 , _snake_case : Tuple=10 , _snake_case : str=0 , _snake_case : List[Any]="mean" , _snake_case : str=False , _snake_case : List[Any]=False , _snake_case : Dict=256 , _snake_case : Union[str, Any]=0 , _snake_case : Union[str, Any]=1 , _snake_case : Optional[Any]=2 , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_norm
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = len(self.conv_dim)
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = squeeze_factor
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = position_buckets
UpperCAmelCase_ = share_att_key
UpperCAmelCase_ = relative_attention
UpperCAmelCase_ = norm_rel_ebd
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = feature_layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = apply_spec_augment
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# sequence classification
UpperCAmelCase_ = use_weighted_layer_sum
UpperCAmelCase_ = classifier_proj_size
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 51 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCamelCase : Union[str, Any] = ksize + 1
UpperCamelCase : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
UpperCamelCase : Any = x - ksize // 2
UpperCamelCase : Any = y - ksize // 2
# degree to radiant
UpperCamelCase : int = theta / 180 * np.pi
UpperCamelCase : List[Any] = np.cos(_theta )
UpperCamelCase : Optional[Any] = np.sin(_theta )
# get kernel x
UpperCamelCase : Dict = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase : Optional[Any] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase : Optional[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase : Union[str, Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCamelCase : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase : Tuple = out / out.max() * 255
__lowerCamelCase : Optional[Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = BartphoTokenizer
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = True
def UpperCAmelCase_ ( self : List[str] ) -> int:
super().setUp()
__SCREAMING_SNAKE_CASE = ["▁This", "▁is", "▁a", "▁t", "est"]
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__SCREAMING_SNAKE_CASE = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : str , **UpperCAmelCase__ : int ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "This is a là test"
__SCREAMING_SNAKE_CASE = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase_ ( self : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "This is a là test"
__SCREAMING_SNAKE_CASE = "▁This ▁is ▁a ▁l à ▁t est".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case ( lowercase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowerCamelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase_ = bertabert.config.encoder.vocab_size
lowerCamelCase_ = tokenizer.sep_token_id
lowerCamelCase_ = tokenizer.cls_token_id
lowerCamelCase_ = 128
lowerCamelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowerCamelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowerCamelCase_ = train_dataset.select(range(32 ) )
lowerCamelCase_ = val_dataset.select(range(16 ) )
lowerCamelCase_ = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase_ = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCamelCase , max_length=512 )
lowerCamelCase_ = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCamelCase , max_length=128 )
lowerCamelCase_ = inputs.input_ids
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = outputs.input_ids
lowerCamelCase_ = outputs.input_ids.copy()
lowerCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowerCamelCase_ = outputs.attention_mask
assert all(len(UpperCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase ):
lowerCamelCase_ = pred.label_ids
lowerCamelCase_ = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
lowerCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase ) )] ) / len(UpperCamelCase )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase , batch_size=UpperCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowerCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase , batch_size=UpperCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase , per_device_train_batch_size=UpperCamelCase , per_device_eval_batch_size=UpperCamelCase , predict_with_generate=UpperCamelCase , evaluation_strategy="steps" , do_train=UpperCamelCase , do_eval=UpperCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase , args=UpperCamelCase , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , tokenizer=UpperCamelCase , )
# start training
trainer.train()
| 55 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 0 |
'''simple docstring'''
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case_ = 6
snake_case_ = 1
snake_case_ = 1901
snake_case_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case_ = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 56 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : int = {"vocab_file": "sentencepiece.bpe.model"}
A : str = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
A : int = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
A : int = "▁"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =VOCAB_FILES_NAMES
__UpperCAmelCase : int =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =["""input_ids""", """attention_mask"""]
def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__lowerCAmelCase = len(self.sp_model ) - 1
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case ( self , __a , __a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self , __a ):
return self.sp_model.encode(__a , out_type=__a )
def snake_case ( self , __a ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case ( self , __a ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case ( self , __a ):
__lowerCAmelCase = []
__lowerCAmelCase = ""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(__a )
__lowerCAmelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , __a ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 57 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
'''simple docstring'''
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=2048 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = config.__dict__
_SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
_SCREAMING_SNAKE_CASE = num_labels
| 58 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 320 | 0 |
import pprint
import requests
__lowerCamelCase = """https://zenquotes.io/api"""
def UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
__lowerCamelCase = random_quotes()
pprint.pprint(response)
| 59 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'bart'
A_ : Optional[Any] = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple:
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
_a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__UpperCAmelCase , __UpperCAmelCase )
_a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_a = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 320 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : int ):
lowerCAmelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[Any] = 2_0
lowerCAmelCase : Any = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase_ )
# tweak scores to not be uniform anymore
lowerCAmelCase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase : Optional[int] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase : Dict = jax.nn.softmax(UpperCamelCase_ , axis=-1 )
lowerCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
lowerCAmelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[int] = 1_0
lowerCAmelCase : str = 2
# create ramp distribution
lowerCAmelCase : Optional[Any] = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase : Dict = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase : List[Any] = 5
lowerCAmelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase : Dict = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase : Optional[int] = top_k_warp_safety_check(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = None
lowerCAmelCase : Dict = 1_0
lowerCAmelCase : List[str] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase : Optional[Any] = np.exp(top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase : Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase : Optional[Any] = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase : str = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : Dict = 0
lowerCAmelCase : Any = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCamelCase_ )
# check that min length is applied at length 5
lowerCAmelCase : List[Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowerCAmelCase : int = 5
lowerCAmelCase : Any = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase : Any = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = 1_5
lowerCAmelCase : str = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = 2_0
lowerCAmelCase : Union[str, Any] = 4
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase : Optional[int] = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowerCAmelCase : Any = 1
lowerCAmelCase : List[str] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase : str = 3
lowerCAmelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = 2_0
lowerCAmelCase : Dict = 4
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = 5
lowerCAmelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase : str = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowerCAmelCase : int = 4
lowerCAmelCase : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = 4
lowerCAmelCase : Tuple = 1_0
lowerCAmelCase : Union[str, Any] = 1_5
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : int = 1
lowerCAmelCase : Tuple = 1_5
# dummy input_ids and scores
lowerCAmelCase : Union[str, Any] = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = input_ids.copy()
lowerCAmelCase : Tuple = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = scores.copy()
# instantiate all dist processors
lowerCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCamelCase_ )
lowerCAmelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowerCAmelCase : List[str] = 1_0
# no processor list
lowerCAmelCase : Dict = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : Dict = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : Any = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : int = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : List[Any] = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# with processor list
lowerCAmelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Dict = 4
lowerCAmelCase : str = 1_0
lowerCAmelCase : str = 1_5
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = 1_5
# dummy input_ids and scores
lowerCAmelCase : int = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
lowerCAmelCase : Dict = input_ids.copy()
lowerCAmelCase : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = scores.copy()
# instantiate all dist processors
lowerCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase : str = FlaxTopKLogitsWarper(3 )
lowerCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase : str = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCamelCase_ )
lowerCAmelCase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
lowerCAmelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = 1_0
# no processor list
def run_no_processor_list(UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : List[Any] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : Dict = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : int = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
lowerCAmelCase : str = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
# with processor list
def run_processor_list(UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
lowerCAmelCase : Any = jax.jit(UpperCamelCase_ )
lowerCAmelCase : int = jax.jit(UpperCamelCase_ )
lowerCAmelCase : str = jitted_run_no_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jitted_run_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 60 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args)
| 320 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 61 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'gptj'
A_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_a = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13
| 320 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "informer"
UpperCAmelCase__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = None , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 0.05 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , A_ = "prob" , A_ = 5 , A_ = True , **A_ , ) -> str:
# time series specific configuration
__UpperCamelCase =prediction_length
__UpperCamelCase =context_length or prediction_length
__UpperCamelCase =distribution_output
__UpperCamelCase =loss
__UpperCamelCase =input_size
__UpperCamelCase =num_time_features
__UpperCamelCase =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase =scaling
__UpperCamelCase =num_dynamic_real_features
__UpperCamelCase =num_static_real_features
__UpperCamelCase =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase =cardinality
else:
__UpperCamelCase =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase =embedding_dimension
else:
__UpperCamelCase =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCamelCase =num_parallel_samples
# Transformer architecture configuration
__UpperCamelCase =input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCamelCase =d_model
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =decoder_layers
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =use_cache
# Informer
__UpperCamelCase =attention_type
__UpperCamelCase =sampling_factor
__UpperCamelCase =distil
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _a ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 62 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 63 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_a = np.array(_lowerCAmelCase )
_a = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> int:
_a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = '''facebook/sam-vit-huge'''
_a = pipeline('''mask-generation''' , model=__UpperCAmelCase )
_a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 320 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 320 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
__UpperCAmelCase : int = 1_00_00
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[datasets.Features] = None
class A ( datasets.ArrowBasedBuilder ):
__UpperCAmelCase : List[Any] = ParquetConfig
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ (self : Dict , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase__ = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__ = [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__ = [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase ):
with open(__UpperCAmelCase , "rb" ) as f:
UpperCAmelCase__ = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase__ = table_cast(__UpperCAmelCase , self.info.features.arrow_schema )
return pa_table
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ):
with open(__UpperCAmelCase , "rb" ) as f:
UpperCAmelCase__ = pq.ParquetFile(__UpperCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(__UpperCAmelCase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" )
raise
| 65 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
| 320 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Any ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
snake_case_ :Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ :List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]:
# create estimator
snake_case_ :List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 320 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__lowerCamelCase = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__lowerCamelCase = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__lowerCamelCase = val
return f[i][j]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__lowerCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__lowerCamelCase = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__lowerCamelCase = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
__lowerCamelCase = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(UpperCamelCase__ )} values"""
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
__lowerCamelCase = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =[3, 2, 4, 4]
__UpperCAmelCase =[4, 3, 2, 3]
__UpperCAmelCase =4
__UpperCAmelCase =6
__UpperCAmelCase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__UpperCAmelCase , __UpperCAmelCase =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__UpperCAmelCase , __UpperCAmelCase =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 67 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''google/rembert''': 256,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = RemBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 320 | 0 |
lowerCAmelCase__ = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 68 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class UpperCamelCase :
def __init__( self) -> Optional[int]:
snake_case_ = []
def a_ ( self, lowerCAmelCase__) -> Any:
return self.node_position[vertex]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = pos
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ = 2 * start + 1
else:
snake_case_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ = temp, tempa
snake_case_ = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child], self.get_position(positions[start]))
self.set_position(positions[start], lowerCAmelCase__)
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = position[index]
while index != 0:
snake_case_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
snake_case_ = heap[parent]
snake_case_ = position[parent]
self.set_position(position[parent], lowerCAmelCase__)
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, lowerCAmelCase__)
break
snake_case_ = parent
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, 0)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = len(lowerCAmelCase__) // 2 - 1
for i in range(lowerCAmelCase__, -1, -1):
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, len(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = positions[0]
snake_case_ = sys.maxsize
self.top_to_bottom(lowerCAmelCase__, 0, len(lowerCAmelCase__), lowerCAmelCase__)
return temp
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = Heap()
snake_case_ = [0] * len(UpperCAmelCase )
snake_case_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ = []
for vertex in range(len(UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase )
heap.node_position.append(UpperCAmelCase )
snake_case_ = []
snake_case_ = 1
snake_case_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ = 0
snake_case_ = distance
heap.heapify(UpperCAmelCase , UpperCAmelCase )
for _ in range(1 , len(UpperCAmelCase ) ):
snake_case_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase )]
):
snake_case_ = distance
heap.bottom_to_top(
UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
snake_case_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input('''Enter number of edges: ''').strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320 | 0 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase :
def __init__( self : List[str] ) -> Any:
_lowerCAmelCase = psutil.Process()
_lowerCAmelCase = False
def lowercase__ ( self : str ) -> Optional[Any]:
_lowerCAmelCase = -1
while True:
_lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase = True
_lowerCAmelCase = threading.Thread(target=self.peak_monitor )
_lowerCAmelCase = True
self.thread.start()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A__ : int =PeakCPUMemory()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase = torch.cuda.memory_allocated(lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
_lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase = (torch.cuda.memory_allocated(lowerCAmelCase ) - start_measures[str(lowerCAmelCase )]) / 2**20
_lowerCAmelCase = (torch.cuda.max_memory_allocated(lowerCAmelCase ) - start_measures[str(lowerCAmelCase )]) / 2**20
return measures
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(lowerCAmelCase )]:.2f}MiB" )
_lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 70 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Any = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Tuple = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Dict = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A_ :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__snake_case = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def remove_articles(_lowerCAmelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Tuple ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(_lowerCAmelCase )
_a = Counter(_lowerCAmelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(_lowerCAmelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ):
"""simple docstring"""
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] )
_a = sari_score / len(_lowerCAmelCase )
return 1_00 * sari_score
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ):
"""simple docstring"""
_a = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_a = sacrebleu.corpus_bleu(
_lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = {}
result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 320 | 0 |
"""simple docstring"""
from math import factorial
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(A_ ) // (factorial(A_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 72 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_a = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 320 | 0 |
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase__ , lowerCamelCase__ ).lstrip('./' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return F"{i * ' '}*" if i else "\n##"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase__ ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(lowerCamelCase__ )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "." ) -> None:
__lowerCamelCase : Optional[int] = ''
for filepath in sorted(good_file_paths(lowerCamelCase__ ) ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = os.path.split(lowerCamelCase__ )
if filepath != old_path:
__lowerCamelCase : Tuple = print_path(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase : Optional[int] = F"{filepath}/{filename}".replace(' ' , '%20' )
__lowerCamelCase : Tuple = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"{md_prefix(lowerCamelCase__ )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 73 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = XLMProphetNetTokenizer
_lowerCamelCase: Optional[Any] = False
_lowerCamelCase: Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = '[PAD]'
A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'[PAD]' )
self.assertEqual(vocab_keys[1] ,'[CLS]' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(A_ ) ,1012 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size ,1012 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
A = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] ,)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
A = 'Hello World!'
A = [3_5389, 6672, 49, 2]
self.assertListEqual(A_ ,self.big_tokenizer.encode(A_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
# fmt: off
A = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
| 74 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = """▁"""
a_ : int = {"""vocab_file""": """sentencepiece.bpe.model"""}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
a_ : int = {
"""xlm-roberta-base""": 5_12,
"""xlm-roberta-large""": 5_12,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_12,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_12,
"""xlm-roberta-large-finetuned-conll03-english""": 5_12,
"""xlm-roberta-large-finetuned-conll03-german""": 5_12,
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP
lowercase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, cls_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
lowerCamelCase_ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ =1
lowerCamelCase_ =len(self.sp_model ) + self.fairseq_offset
lowerCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
lowerCamelCase_ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
lowerCamelCase_ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ =self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''''''.join(lowerCAmelCase ).replace(lowerCAmelCase, ''' ''' ).strip()
return out_string
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 75 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320 | 0 |
from collections.abc import Sequence
def lowerCamelCase__ ( _a = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
SCREAMING_SNAKE_CASE : Dict = nums[0]
for i in range(1 , len(_a)):
SCREAMING_SNAKE_CASE : Tuple = nums[i]
SCREAMING_SNAKE_CASE : Dict = max(_a , ans + num , _a)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
a_ = int(input('Enter number of elements : ').strip())
a_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 76 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : str = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_UpperCamelCase : Dict = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Dict = torch.load(_lowerCAmelCase , map_location='cpu' )
return sd
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=rename_keys_prefix ):
'''simple docstring'''
lowercase__ : Union[str, Any] = OrderedDict()
lowercase__ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : List[str] = key
for name_pair in rename_keys_prefix:
lowercase__ : List[Any] = new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : str = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ):
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase__ : List[Any] = 'pretraining'
if "vcr" in checkpoint_path:
lowercase__ : int = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Tuple = {'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
lowercase__ : Union[str, Any] = {'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
lowercase__ : List[str] = {'visual_embedding_dim': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase__ : Optional[Any] = {'visual_embedding_dim': 512}
lowercase__ : int = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
lowercase__ : int = {'visual_embedding_dim': 2048}
lowercase__ : Tuple = 'vqa_advanced'
elif "vqa" in checkpoint_path:
lowercase__ : Union[str, Any] = {'visual_embedding_dim': 2048, 'num_labels': 3129}
lowercase__ : int = 'vqa'
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple = {
'visual_embedding_dim': 1024,
'num_labels': 2,
}
lowercase__ : List[str] = 'nlvr'
lowercase__ : Optional[Any] = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
lowercase__ : List[Any] = load_state_dict(_lowerCAmelCase )
lowercase__ : str = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
lowercase__ : Optional[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
lowercase__ : Optional[Any] = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
lowercase__ : Dict = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
lowercase__ : List[Any] = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 77 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = """▁"""
snake_case_ = {"""vocab_file""": """spiece.model"""}
snake_case_ = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
snake_case_ = {
"""google/pegasus-xsum""": 512,
}
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Optional[Any] , lowercase_ :List[Any] , lowercase_ :List[Any]="<pad>" , lowercase_ :Optional[int]="</s>" , lowercase_ :Any="<unk>" , lowercase_ :Dict="<mask_2>" , lowercase_ :Dict="<mask_1>" , lowercase_ :Union[str, Any]=None , lowercase_ :Optional[Any]=1_03 , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Optional[Any] , ) -> None:
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowercase_ )}, but is"""
f""" {type(lowercase_ )}""" )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = mask_token_sent
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
UpperCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self :List[str] ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self :int ) -> Dict[str, int]:
UpperCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Any:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self :Any , lowercase_ :List[Any] ) -> Optional[Any]:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[Any] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Union[str, Any]=False ) -> Any:
return 1
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List , lowercase_ :Optional[List] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] , lowercase_ :str=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :Any , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 78 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : int=30 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Union[str, Any]=0.6 , __UpperCAmelCase : Any=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase )
_A = (self.image_size // self.patch_size) ** 2
_A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A = 1
_A = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(__UpperCAmelCase )
_A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = ViTMAEModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ):
'''simple docstring'''
np.random.seed(2 )
_A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_A = outputs[0].cpu().numpy()
_A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
_A = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
_A = after_outputs[0].cpu().numpy()
_A = 0
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
np.random.seed(2 )
_A = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A = ViTMAEConfig()
_A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_A = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
_A = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_A = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
| 79 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
UpperCamelCase__ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _UpperCamelCase ( __A ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCamelCase__ = []
for current_row_idx in range(__A ):
UpperCamelCase__ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def _UpperCamelCase ( __A , __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase__ , UpperCamelCase__ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def _UpperCamelCase ( __A , __A , __A , __A , ) -> None:
'''simple docstring'''
UpperCamelCase__ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase__ = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase__ = above_to_left_elt + above_to_right_elt
def _UpperCamelCase ( __A ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCamelCase__ = [[1]]
for row_index in range(1 , __A ):
UpperCamelCase__ = [0] + result[-1] + [0]
UpperCamelCase__ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase__ = sum(divmod(__A , 2 ) )
UpperCamelCase__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase__ = row_first_half + row_second_half
result.append(__A )
return result
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A , __A ) -> None:
UpperCamelCase__ = F'''{func.__name__}({value})'''
UpperCamelCase__ = timeit(F'''__main__.{call}''' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 80 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 320 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.