code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__snake_case , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__snake_case , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__snake_case )
return parser.parse_args()
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =parse_args()
# Import training_script as a module.
lowerCamelCase_ =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase_ =script_fpath.stem
lowerCamelCase_ =importlib.import_module(__snake_case )
# Patch sys.argv
lowerCamelCase_ =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : str = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""PerceiverFeatureExtractor"""]
a_ : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
a_ : Dict = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
@lru_cache()
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase_ =bs[:]
lowerCamelCase_ =0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ =[chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def a_ ( __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =set()
lowerCamelCase_ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ =char
return pairs
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Any =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : int =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="replace", lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase=False, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else bos_token
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else eos_token
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else sep_token
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else cls_token
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else unk_token
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token
super().__init__(
errors=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, cls_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, **lowerCAmelCase, )
with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ =json.load(lowerCAmelCase )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
lowerCamelCase_ =errors # how to handle errors in decoding
lowerCamelCase_ =bytes_to_unicode()
lowerCamelCase_ ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase_ =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={}
lowerCamelCase_ =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_, lowerCamelCase_ =bigram
lowerCamelCase_ =[]
lowerCamelCase_ =0
while i < len(lowerCAmelCase ):
try:
lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ =j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =new_word
if len(lowerCAmelCase ) == 1:
break
else:
lowerCamelCase_ =get_pairs(lowerCAmelCase )
lowerCamelCase_ =''' '''.join(lowerCAmelCase )
lowerCamelCase_ =word
return word
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for token in re.findall(self.pat, lowerCAmelCase ):
lowerCamelCase_ =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''''''.join(lowerCAmelCase )
lowerCamelCase_ =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' )
lowerCamelCase_ =0
with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ =token_index
writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
lowerCamelCase_ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()):
lowerCamelCase_ =''' ''' + text
return (text, kwargs)
| 6 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 1 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowerCamelCase_ =[
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCamelCase_ =[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__snake_case , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ =primes[:idx]
break
lowerCamelCase_, lowerCamelCase_ =n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ =False
for r in range(__snake_case ):
lowerCamelCase_ =pow(__snake_case , d * 2**r , __snake_case )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ =True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 6 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a_ : str = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 6 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Any =['image_processor', 'tokenizer']
lowercase : Optional[int] ='ChineseCLIPImageProcessor'
lowercase : List[Any] =('BertTokenizer', 'BertTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
| 6 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Union[str, Any] ='resnet'
lowercase : int =['basic', 'bottleneck']
def __init__( self, lowerCAmelCase=3, lowerCAmelCase=64, lowerCAmelCase=[256, 512, 1_024, 2_048], lowerCAmelCase=[3, 4, 6, 3], lowerCAmelCase="bottleneck", lowerCAmelCase="relu", lowerCAmelCase=False, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowerCamelCase_ =num_channels
lowerCamelCase_ =embedding_size
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =layer_type
lowerCamelCase_ =hidden_act
lowerCamelCase_ =downsample_in_first_stage
lowerCamelCase_ =['''stem'''] + [f'''stage{idx}''' for idx in range(1, len(lowerCAmelCase ) + 1 )]
lowerCamelCase_, lowerCamelCase_ =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase, out_indices=lowerCAmelCase, stage_names=self.stage_names )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-3
| 6 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='LayoutLMv3ImageProcessor'
lowercase : List[Any] =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ : Optional[int] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
model.push_to_hub('''test-model-flax''', use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase, repo_id='''test-model-flax''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''', use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase, repo_id='''valid_org/test-model-flax-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
def a_ ( __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =True
lowerCamelCase_ =flatten_dict(modela.params )
lowerCamelCase_ =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCamelCase_ =False
return models_are_equal
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
lowerCamelCase_ ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase, lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertTrue(check_models_equal(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
lowerCamelCase_ ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase, lowerCAmelCase ), max_shard_size='''10KB''' )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertTrue(check_models_equal(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert'''
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert'''
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
| 6 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a_ : Any = numpy.array([0, 0])
a_ : List[str] = numpy.array([0.5, 0.8_66_02_54])
a_ : Union[str, Any] = numpy.array([1, 0])
a_ : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a_ ( __snake_case : list[numpy.ndarray] , __snake_case : int ) -> list[numpy.ndarray]:
"""simple docstring"""
lowerCamelCase_ =initial_vectors
for _ in range(__snake_case ):
lowerCamelCase_ =iteration_step(__snake_case )
return vectors
def a_ ( __snake_case : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
lowerCamelCase_ =[]
for i, start_vector in enumerate(vectors[:-1] ):
lowerCamelCase_ =vectors[i + 1]
new_vectors.append(__snake_case )
lowerCamelCase_ =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a_ ( __snake_case : numpy.ndarray , __snake_case : float ) -> numpy.ndarray:
"""simple docstring"""
lowerCamelCase_ =numpy.radians(__snake_case )
lowerCamelCase_, lowerCamelCase_ =numpy.cos(__snake_case ), numpy.sin(__snake_case )
lowerCamelCase_ =numpy.array(((c, -s), (s, c)) )
return numpy.dot(__snake_case , __snake_case )
def a_ ( __snake_case : list[numpy.ndarray] ) -> None:
"""simple docstring"""
lowerCamelCase_ =plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCamelCase_, lowerCamelCase_ =zip(*__snake_case )
plt.plot(__snake_case , __snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 6 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 1 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any]=1024 , __snake_case : Any=1024 , __snake_case : Dict=False , **__snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained(__snake_case )
lowerCamelCase_ =SeqaSeqDataset(__snake_case , __snake_case , __snake_case , __snake_case , type_path='''train''' , **__snake_case )
lowerCamelCase_ =tok.pad_token_id
def get_lens(__snake_case : List[str] ):
lowerCamelCase_ =tqdm(
DataLoader(__snake_case , batch_size=512 , num_workers=8 , shuffle=__snake_case , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCamelCase_ =[]
for batch in dl:
lowerCamelCase_ =batch['''input_ids'''].ne(__snake_case ).sum(1 ).tolist()
lowerCamelCase_ =batch['''labels'''].ne(__snake_case ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__snake_case , __snake_case ):
max_lens.append(max(__snake_case , __snake_case ) )
else:
max_lens.extend(__snake_case )
return max_lens
lowerCamelCase_ =get_lens(__snake_case )
lowerCamelCase_ =SeqaSeqDataset(__snake_case , __snake_case , __snake_case , __snake_case , type_path='''val''' , **__snake_case )
lowerCamelCase_ =get_lens(__snake_case )
pickle_save(__snake_case , train_ds.len_file )
pickle_save(__snake_case , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =a.name
lowerCamelCase_ =b.name
lowerCamelCase_ =''''''
lowerCamelCase_ =''''''
lowerCamelCase_ =a == b
lowerCamelCase_ =name_a
lowerCamelCase_ =name_b
return res
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__snake_case , __snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __snake_case , __snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , __snake_case , __snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __snake_case , __snake_case )
def a_ ( __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__snake_case , __snake_case , __snake_case )
def a_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =list(model.graph.initializer )
lowerCamelCase_ =list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase_ =inits[i].name
lowerCamelCase_ =inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __snake_case , __snake_case )
def a_ ( __snake_case : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =os.path.dirname(__snake_case )
lowerCamelCase_ =os.path.basename(__snake_case )
lowerCamelCase_ =onnx.load(os.path.join(__snake_case , __snake_case ) )
lowerCamelCase_ =list(model.graph.initializer )
lowerCamelCase_ =set()
lowerCamelCase_ ={}
lowerCamelCase_ =[]
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__snake_case )
dup_set.add(__snake_case )
lowerCamelCase_ =inits[j].data_type
lowerCamelCase_ =numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , __snake_case )
total_reduced_size += mem_size
lowerCamelCase_ =inits[i].name
lowerCamelCase_ =inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__snake_case )
else:
lowerCamelCase_ =[name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
lowerCamelCase_ =sorted(__snake_case )
_remove_dup_initializers_from_model(__snake_case , __snake_case , __snake_case )
lowerCamelCase_ ='''optimized_''' + model_file_name
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
onnx.save(__snake_case , __snake_case )
return new_model
| 6 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
def a_ ( __snake_case : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCamelCase_ =sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def a_ ( __snake_case : int ) -> typing.Counter[int]:
"""simple docstring"""
lowerCamelCase_ =Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
lowerCamelCase_ =(base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
lowerCamelCase_ =int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 6 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int , __snake_case : int ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase_ =[]
create_all_state(1 , __snake_case , __snake_case , [] , __snake_case )
return result
def a_ ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__snake_case , total_number - level + 2 ):
current_list.append(__snake_case )
create_all_state(i + 1 , __snake_case , level - 1 , __snake_case , __snake_case )
current_list.pop()
def a_ ( __snake_case : list[list[int]] ) -> None:
"""simple docstring"""
for i in total_list:
print(*__snake_case )
if __name__ == "__main__":
a_ : Tuple = 4
a_ : Union[str, Any] = 2
a_ : Tuple = generate_all_combinations(n, k)
print_all_state(total_list)
| 6 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> tuple:
"""simple docstring"""
lowerCamelCase_ =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a_ : List[Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1 ):
"""simple docstring"""
lowerCamelCase_ =tokenizer
lowerCamelCase_ =dataset
lowerCamelCase_ =len(lowerCAmelCase ) if n_tasks is None else n_tasks
lowerCamelCase_ =n_copies
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =start_length
lowerCamelCase_ =eof_strings
lowerCamelCase_ =tokenizer
def __call__( self, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCAmelCase )
def a_ ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def a_ ( __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int]=20 , **__snake_case : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
lowerCamelCase_ =batch['''ids'''].shape[-1]
lowerCamelCase_ =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
lowerCamelCase_ =batch['''task_id'''].repeat(__snake_case )
lowerCamelCase_ =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ =generated_tokens.cpu().numpy()
lowerCamelCase_ =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
lowerCamelCase_ =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def a_ ( ) -> Any:
"""simple docstring"""
# Setup configuration
lowerCamelCase_ =HfArgumentParser(__snake_case )
lowerCamelCase_ =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ ='''false'''
if args.num_workers is None:
lowerCamelCase_ =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
lowerCamelCase_ =AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ =tokenizer.eos_token
lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ =load_dataset('''openai_humaneval''' )
lowerCamelCase_ =load_metric('''code_eval''' )
lowerCamelCase_ =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowerCamelCase_ =args.n_samples // args.batch_size
lowerCamelCase_ =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(__snake_case , __snake_case )
lowerCamelCase_ =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
lowerCamelCase_ =[]
for task in tqdm(range(__snake_case ) ):
lowerCamelCase_ =human_eval['''test'''][task]['''test''']
lowerCamelCase_ =F'''check({human_eval['test'][task]['entry_point']})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_, lowerCamelCase_ =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 1 |
'''simple docstring'''
import argparse
a_ : int = """docs/source/_static/js/custom.js"""
def a_ ( __snake_case : Tuple ) -> List[str]:
"""simple docstring"""
with open(__snake_case , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
lowerCamelCase_ =0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
lowerCamelCase_ =F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
a_ : str = parser.parse_args()
update_custom_js(args.version)
| 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : list[int] , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ =i + 1
else:
lowerCamelCase_ =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 6 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : int = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""CLIPFeatureExtractor"""]
a_ : List[str] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Dict = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowerCamelCase_ =[Image.fromarray(np.moveaxis(lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ =ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase )
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =self.get_tokenizer(cls_token='''(CLS)''', sep_token='''(SEP)''' )
lowerCamelCase_ =self.get_image_processor(do_normalize=lowerCAmelCase )
lowerCamelCase_ =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token='''(CLS)''', sep_token='''(SEP)''', do_normalize=lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =image_processor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(images=lowerCAmelCase, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowerCamelCase_ ='''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ =processor(text=lowerCAmelCase )
lowerCamelCase_ =tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowerCamelCase_ ='''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =processor(text=lowerCAmelCase, images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowerCamelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ =processor.batch_decode(lowerCAmelCase )
lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =ChineseCLIPProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowerCamelCase_ ='''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =processor(text=lowerCAmelCase, images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 6 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
# compute the shape of the output matrix
lowerCamelCase_ =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase_ =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase_ =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ =0
lowerCamelCase_ =0
return updated_arr
def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
# compute the shape of the output matrix
lowerCamelCase_ =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase_ =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase_ =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ =0
lowerCamelCase_ =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
a_ : int = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] =['image_processor', 'tokenizer']
lowercase : Any ='BlipImageProcessor'
lowercase : List[Any] =('BertTokenizer', 'BertTokenizerFast')
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =False
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
def __call__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase_ =self.tokenizer
lowerCamelCase_ =self.tokenizer(
text=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
return text_encoding
# add pixel_values
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(
text=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
else:
lowerCamelCase_ =None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase )
return encoding_image_processor
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 6 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a_ ( __snake_case : int , __snake_case : Any , __snake_case : Union[str, Any]=1e-12 ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__snake_case , axis=1 ) , a_min=__snake_case ) ).T
lowerCamelCase_ =jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__snake_case , axis=1 ) , a_min=__snake_case ) ).T
return jnp.matmul(__snake_case , norm_emb_a.T )
class __UpperCamelCase ( nn.Module ):
lowercase : CLIPConfig
lowercase : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaxCLIPVisionModule(self.config.vision_config )
lowerCamelCase_ =nn.Dense(self.config.projection_dim, use_bias=lowerCAmelCase, dtype=self.dtype )
lowerCamelCase_ =self.param('''concept_embeds''', jax.nn.initializers.ones, (17, self.config.projection_dim) )
lowerCamelCase_ =self.param(
'''special_care_embeds''', jax.nn.initializers.ones, (3, self.config.projection_dim) )
lowerCamelCase_ =self.param('''concept_embeds_weights''', jax.nn.initializers.ones, (17,) )
lowerCamelCase_ =self.param('''special_care_embeds_weights''', jax.nn.initializers.ones, (3,) )
def __call__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.vision_model(lowerCAmelCase )[1]
lowerCamelCase_ =self.visual_projection(lowerCAmelCase )
lowerCamelCase_ =jax_cosine_distance(lowerCAmelCase, self.special_care_embeds )
lowerCamelCase_ =jax_cosine_distance(lowerCAmelCase, self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCamelCase_ =0.0
lowerCamelCase_ =special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCamelCase_ =jnp.round(lowerCAmelCase, 3 )
lowerCamelCase_ =jnp.any(special_scores > 0, axis=1, keepdims=lowerCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCamelCase_ =is_special_care * 0.0_1
lowerCamelCase_ =cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCamelCase_ =jnp.round(lowerCAmelCase, 3 )
lowerCamelCase_ =jnp.any(concept_scores > 0, axis=1 )
return has_nsfw_concepts
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple =CLIPConfig
lowercase : Union[str, Any] ='clip_input'
lowercase : List[str] =FlaxStableDiffusionSafetyCheckerModule
def __init__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = jnp.floataa, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
if input_shape is None:
lowerCamelCase_ =(1, 224, 224, 3)
lowerCamelCase_ =self.module_class(config=lowerCAmelCase, dtype=lowerCAmelCase, **lowerCAmelCase )
super().__init__(lowerCAmelCase, lowerCAmelCase, input_shape=lowerCAmelCase, seed=lowerCAmelCase, dtype=lowerCAmelCase, _do_init=_do_init )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =jax.random.normal(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =jax.random.split(lowerCAmelCase )
lowerCamelCase_ ={'''params''': params_rng, '''dropout''': dropout_rng}
lowerCamelCase_ =self.module.init(lowerCAmelCase, lowerCAmelCase )['''params''']
return random_params
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params}, jnp.array(lowerCAmelCase, dtype=jnp.floataa ), rngs={}, )
| 6 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 1 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> bool:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCamelCase_ =True
for i in range(__snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase_ =True
if a[i].islower():
lowerCamelCase_ =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = """▁"""
a_ : List[Any] = {"""vocab_file""": """spiece.model"""}
a_ : str = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
a_ : str = {
"""google/pegasus-xsum""": 5_12,
}
a_ : Any = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Union[str, Any] =VOCAB_FILES_NAMES
lowercase : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<pad>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<mask_2>", lowerCAmelCase="<mask_1>", lowerCAmelCase=None, lowerCAmelCase=103, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase )}, but is'''
f''' {type(lowerCAmelCase )}''' )
lowerCamelCase_ =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase ), self.offset - 1 )
]
if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase_ =additional_special_tokens_extended
else:
lowerCamelCase_ =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2, self.offset )]
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, mask_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token_sent=lowerCAmelCase, offset=lowerCAmelCase, additional_special_tokens=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =mask_token_sent
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# add special tokens to encoder dict
lowerCamelCase_ ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCamelCase_ =self.sp_model.piece_to_id(lowerCAmelCase )
return sp_id + self.offset
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCamelCase_ =self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase=False ):
"""simple docstring"""
return 1
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =CTRLTokenizer
lowercase : Union[str, Any] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowerCamelCase_ ={'''unk_token''': '''<unk>'''}
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''adapt react readapt apt'''
lowerCamelCase_ ='''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase_ ='''adapt react readapt apt'''
lowerCamelCase_ ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =tokens + [tokenizer.unk_token]
lowerCamelCase_ =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
| 6 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase = 16, lowerCAmelCase = 88, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 32, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "geglu", lowerCAmelCase = None, ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase, attention_head_dim=lowerCAmelCase, in_channels=lowerCAmelCase, num_layers=lowerCAmelCase, dropout=lowerCAmelCase, norm_num_groups=lowerCAmelCase, cross_attention_dim=lowerCAmelCase, attention_bias=lowerCAmelCase, sample_size=lowerCAmelCase, num_vector_embeds=lowerCAmelCase, activation_fn=lowerCAmelCase, num_embeds_ada_norm=lowerCAmelCase, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ =[77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ =[1, 0]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =hidden_states
lowerCamelCase_ =[]
lowerCamelCase_ =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ =self.transformer_index_for_condition[i]
lowerCamelCase_ =self.transformers[transformer_index](
lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, timestep=lowerCAmelCase, cross_attention_kwargs=lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase )
| 6 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def a_ ( __snake_case : float , __snake_case : float , __snake_case : int ) -> float:
"""simple docstring"""
lowerCamelCase_ =x
lowerCamelCase_ =y
for step in range(__snake_case ): # noqa: B007
lowerCamelCase_ =a * a - b * b + x
lowerCamelCase_ =2 * a * b + y
lowerCamelCase_ =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a_ ( __snake_case : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a_ ( __snake_case : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__snake_case , 1 , 1 ) )
def a_ ( __snake_case : int = 800 , __snake_case : int = 600 , __snake_case : float = -0.6 , __snake_case : float = 0 , __snake_case : float = 3.2 , __snake_case : int = 50 , __snake_case : bool = True , ) -> Image.Image:
"""simple docstring"""
lowerCamelCase_ =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase_ =img.load()
# loop through the image-coordinates
for image_x in range(__snake_case ):
for image_y in range(__snake_case ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ =figure_width / image_width * image_height
lowerCamelCase_ =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ =get_distance(__snake_case , __snake_case , __snake_case )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ =get_color_coded_rgb(__snake_case )
else:
lowerCamelCase_ =get_black_and_white_rgb(__snake_case )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ : Dict = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 6 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''', lowerCAmelCase, )
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
| 6 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''ZinengTang/tvlt-base'''
lowerCamelCase_ =tempfile.mkdtemp()
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=lowerCAmelCase, feature_extractor=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor, lowerCAmelCase )
self.assertIsInstance(processor.image_processor, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=lowerCAmelCase, feature_extractor=lowerCAmelCase )
lowerCamelCase_ =np.ones([12_000] )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(audio=lowerCAmelCase, return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=lowerCAmelCase, feature_extractor=lowerCAmelCase )
lowerCamelCase_ =np.ones([3, 224, 224] )
lowerCamelCase_ =image_processor(lowerCAmelCase, return_tensors='''np''' )
lowerCamelCase_ =processor(images=lowerCAmelCase, return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=lowerCAmelCase, feature_extractor=lowerCAmelCase )
lowerCamelCase_ =np.ones([12_000] )
lowerCamelCase_ =np.ones([3, 224, 224] )
lowerCamelCase_ =processor(audio=lowerCAmelCase, images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_feature_extractor()
lowerCamelCase_ =TvltProcessor(image_processor=lowerCAmelCase, feature_extractor=lowerCAmelCase )
self.assertListEqual(
processor.model_input_names, image_processor.model_input_names + feature_extractor.model_input_names, msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''', )
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[Any] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='poolformer'
def __init__( self, lowerCAmelCase=3, lowerCAmelCase=16, lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=4.0, lowerCAmelCase=[2, 2, 6, 2], lowerCAmelCase=[64, 128, 320, 512], lowerCAmelCase=[7, 3, 3, 3], lowerCAmelCase=[4, 2, 2, 2], lowerCAmelCase=[2, 1, 1, 1], lowerCAmelCase=4, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=True, lowerCAmelCase=1e-5, lowerCAmelCase=0.0_2, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =stride
lowerCamelCase_ =padding
lowerCamelCase_ =pool_size
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =mlp_ratio
lowerCamelCase_ =depths
lowerCamelCase_ =patch_sizes
lowerCamelCase_ =strides
lowerCamelCase_ =num_encoder_blocks
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =hidden_act
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =initializer_range
super().__init__(**lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 2e-3
| 6 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
from math import ceil
def a_ ( __snake_case : int = 1001 ) -> int:
"""simple docstring"""
lowerCamelCase_ =1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ =2 * i + 1
lowerCamelCase_ =2 * i
lowerCamelCase_ =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a_ : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 6 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : List[str] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Optional[Any] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='levit'
def __init__( self, lowerCAmelCase=224, lowerCAmelCase=3, lowerCAmelCase=3, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=16, lowerCAmelCase=[128, 256, 384], lowerCAmelCase=[4, 8, 12], lowerCAmelCase=[4, 4, 4], lowerCAmelCase=[16, 16, 16], lowerCAmelCase=0, lowerCAmelCase=[2, 2, 2], lowerCAmelCase=[2, 2, 2], lowerCAmelCase=0.0_2, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =kernel_size
lowerCamelCase_ =stride
lowerCamelCase_ =padding
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =depths
lowerCamelCase_ =key_dim
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =patch_size
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =mlp_ratio
lowerCamelCase_ =initializer_range
lowerCamelCase_ =[
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 6 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> np.array:
"""simple docstring"""
lowerCamelCase_ =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ =np.zeros((n + 1,) )
lowerCamelCase_ =ya
lowerCamelCase_ =xa
for k in range(__snake_case ):
lowerCamelCase_ =y[k] + step_size * ode_func(__snake_case , y[k] )
lowerCamelCase_ =y[k] + (
(step_size / 2) * (ode_func(__snake_case , y[k] ) + ode_func(x + step_size , __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 1 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__snake_case : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__snake_case )
def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__snake_case : int , __snake_case : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase_ =sum(
count_of_possible_combinations_with_dp_array(target - item , __snake_case )
for item in array )
lowerCamelCase_ =answer
return answer
lowerCamelCase_ =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__snake_case , __snake_case )
def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =[0] * (target + 1)
lowerCamelCase_ =1
for i in range(1 , target + 1 ):
for j in range(__snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = 3
a_ : str = 5
a_ : Union[str, Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 6 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 1 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =(DDIMParallelScheduler,)
lowercase : List[Any] =(('eta', 0.0), ('num_inference_steps', 50))
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase )
return config
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(**lowerCAmelCase )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =10, 0.0
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for t in scheduler.timesteps:
lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1], [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase, beta_end=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase, prediction_type=lowerCAmelCase, sample_max_value=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase, num_inference_steps=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase, eta=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.1_4_7_7_1 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.3_2_4_6_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.0_2 ) ) < 1e-5
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =10, 0.0
scheduler.set_timesteps(lowerCAmelCase )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
lowerCamelCase_ =self.dummy_sample_deter + 0.1
lowerCamelCase_ =self.dummy_sample_deter - 0.1
lowerCamelCase_ =samplea.shape[0]
lowerCamelCase_ =torch.stack([samplea, samplea, samplea], dim=0 )
lowerCamelCase_ =torch.arange(lowerCAmelCase )[0:3, None].repeat(1, lowerCAmelCase )
lowerCamelCase_ =model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
lowerCamelCase_ =scheduler.batch_step_no_noise(lowerCAmelCase, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), lowerCAmelCase )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop()
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(prediction_type='''v_prediction''' )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1e-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(set_alpha_to_one=lowerCAmelCase, beta_start=0.0_1 )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(set_alpha_to_one=lowerCAmelCase, beta_start=0.0_1 )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1e-3
| 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a_ : Union[str, Any] = logging.get_logger("""transformers.models.encodec""")
a_ : Dict = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
a_ : Dict = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
a_ : Any = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
a_ : Any = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
a_ : int = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
a_ : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a_ : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a_ : List[Any] = []
a_ : Any = []
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[str] ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape
else:
lowerCamelCase_ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ =value
elif weight_type == "weight_g":
lowerCamelCase_ =value
elif weight_type == "weight_v":
lowerCamelCase_ =value
elif weight_type == "bias":
lowerCamelCase_ =value
elif weight_type == "running_mean":
lowerCamelCase_ =value
elif weight_type == "running_var":
lowerCamelCase_ =value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ =value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ =value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ =value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ =value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ =value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ =value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ =value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ =value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ =value
else:
lowerCamelCase_ =value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( __snake_case : str , __snake_case : str ) -> Tuple:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_, lowerCamelCase_ =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =[]
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ =MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ =MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase_ =False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_, lowerCamelCase_ =key.split('''.*.''' )
if prefix in name and suffix in name:
lowerCamelCase_ =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowerCamelCase_ =True
if "*" in mapped_key:
lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowerCamelCase_ ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ ='''weight_v'''
elif "weight_ih_l0" in name:
lowerCamelCase_ ='''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowerCamelCase_ ='''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowerCamelCase_ ='''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowerCamelCase_ ='''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowerCamelCase_ ='''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowerCamelCase_ ='''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowerCamelCase_ ='''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowerCamelCase_ ='''bias_hh_l1'''
elif "bias" in name:
lowerCamelCase_ ='''bias'''
elif "weight" in name:
lowerCamelCase_ ='''weight'''
elif "running_mean" in name:
lowerCamelCase_ ='''running_mean'''
elif "running_var" in name:
lowerCamelCase_ ='''running_var'''
elif "num_batches_tracked" in name:
lowerCamelCase_ ='''num_batches_tracked'''
else:
lowerCamelCase_ =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Tuple=None , __snake_case : List[str]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCamelCase_ =EncodecConfig.from_pretrained(__snake_case )
else:
lowerCamelCase_ =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ =[8, 5, 4, 4]
lowerCamelCase_ =[2.2]
lowerCamelCase_ =64
lowerCamelCase_ =3_2000
lowerCamelCase_ =2048
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =False
elif model_name == "encodec_48khz":
lowerCamelCase_ =[8, 5, 4, 2]
lowerCamelCase_ =[3.0, 6.0, 1_2.0, 2_4.0]
lowerCamelCase_ =4_8000
lowerCamelCase_ =2
lowerCamelCase_ =False
lowerCamelCase_ ='''time_group_norm'''
lowerCamelCase_ =True
lowerCamelCase_ =1.0
lowerCamelCase_ =0.0_1
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCamelCase_ =EncodecModel(__snake_case )
lowerCamelCase_ =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__snake_case )
lowerCamelCase_ =torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ =original_checkpoint['''best_state''']
recursively_load_weights(__snake_case , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ : Any = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 6 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCamelCase :
def __init__( self, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=64, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =np.random.default_rng(lowerCAmelCase )
lowerCamelCase_ =length
lowerCamelCase_ =rng.normal(size=(length,) ).astype(np.floataa )
lowerCamelCase_ =a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=False ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCamelCase_ =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCamelCase_ =True
def lowercase__ ( self, lowerCAmelCase=None ):
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowerCamelCase_ =False
return x * self.a[0] + self.b[0]
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=False ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
lowerCamelCase_ =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
lowerCamelCase_ =True
def lowercase__ ( self, lowerCAmelCase=None ):
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
lowerCamelCase_ =False
return x * self.a + self.b
def a_ ( __snake_case : Dict , __snake_case : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCamelCase_ =load_dataset('''csv''' , data_files=__snake_case )
lowerCamelCase_ =datasets['''train'''].unique('''label''' )
lowerCamelCase_ ={v: i for i, v in enumerate(__snake_case )}
def tokenize_function(__snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' )
if "label" in examples:
lowerCamelCase_ =[label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__snake_case , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=2 )
lowerCamelCase_ =DataLoader(tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=1 )
return train_dataloader, eval_dataloader
| 6 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 6 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BILINEAR, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 256}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 6 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int]=0.9_9_9 , __snake_case : List[str]="cosine" , ) -> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : Tuple ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : Any ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCamelCase_ =[]
for i in range(__snake_case ):
lowerCamelCase_ =i / num_diffusion_timesteps
lowerCamelCase_ =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : List[Any] =[e.name for e in KarrasDiffusionSchedulers]
lowercase : str =2
@register_to_config
def __init__( self, lowerCAmelCase = 1_000, lowerCAmelCase = 0.0_0_0_8_5, lowerCAmelCase = 0.0_1_2, lowerCAmelCase = "linear", lowerCAmelCase = None, lowerCAmelCase = "epsilon", lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = 1.0, lowerCAmelCase = "linspace", lowerCAmelCase = 0, ):
"""simple docstring"""
if trained_betas is not None:
lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase_ =torch.linspace(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ =(
torch.linspace(beta_start**0.5, beta_end**0.5, lowerCAmelCase, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase, alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase, alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCamelCase_ =1.0 - self.betas
lowerCamelCase_ =torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =use_karras_sigmas
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if schedule_timesteps is None:
lowerCamelCase_ =self.timesteps
lowerCamelCase_ =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase_ =1 if len(lowerCAmelCase ) > 1 else 0
else:
lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
lowerCamelCase_ =self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.index_for_timestep(lowerCAmelCase )
lowerCamelCase_ =self.sigmas[step_index]
lowerCamelCase_ =sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =num_inference_steps
lowerCamelCase_ =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase_ =np.linspace(0, num_train_timesteps - 1, lowerCAmelCase, dtype=lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase_ =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(np.arange(0, lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase_ =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(np.arange(lowerCAmelCase, 0, -step_ratio )).round().copy().astype(lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCamelCase_ =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase_ =np.log(lowerCAmelCase )
lowerCamelCase_ =np.interp(lowerCAmelCase, np.arange(0, len(lowerCAmelCase ) ), lowerCAmelCase )
if self.config.use_karras_sigmas:
lowerCamelCase_ =self._convert_to_karras(in_sigmas=lowerCAmelCase, num_inference_steps=self.num_inference_steps )
lowerCamelCase_ =np.array([self._sigma_to_t(lowerCAmelCase, lowerCAmelCase ) for sigma in sigmas] )
lowerCamelCase_ =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase )
lowerCamelCase_ =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase )
lowerCamelCase_ =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase ).startswith('''mps''' ):
# mps does not support float64
lowerCamelCase_ =timesteps.to(lowerCAmelCase, dtype=torch.floataa )
else:
lowerCamelCase_ =timesteps.to(device=lowerCAmelCase )
# empty dt and derivative
lowerCamelCase_ =None
lowerCamelCase_ =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase_ =defaultdict(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =np.log(lowerCAmelCase )
# get distribution
lowerCamelCase_ =log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCamelCase_ =np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCamelCase_ =low_idx + 1
lowerCamelCase_ =log_sigmas[low_idx]
lowerCamelCase_ =log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase_ =(low - log_sigma) / (low - high)
lowerCamelCase_ =np.clip(lowerCAmelCase, 0, 1 )
# transform interpolation to time range
lowerCamelCase_ =(1 - w) * low_idx + w * high_idx
lowerCamelCase_ =t.reshape(sigma.shape )
return t
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =in_sigmas[-1].item()
lowerCamelCase_ =in_sigmas[0].item()
lowerCamelCase_ =7.0 # 7.0 is the value used in the paper
lowerCamelCase_ =np.linspace(0, 1, lowerCAmelCase )
lowerCamelCase_ =sigma_min ** (1 / rho)
lowerCamelCase_ =sigma_max ** (1 / rho)
lowerCamelCase_ =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.dt is None
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =self.index_for_timestep(lowerCAmelCase )
# advance index counter by 1
lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase_ =self.sigmas[step_index]
lowerCamelCase_ =self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCamelCase_ =self.sigmas[step_index - 1]
lowerCamelCase_ =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase_ =0
lowerCamelCase_ =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_next
lowerCamelCase_ =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_next
lowerCamelCase_ =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCamelCase_ =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCamelCase_ =pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase_ =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase_ =sigma_next - sigma_hat
# store for 2nd order step
lowerCamelCase_ =derivative
lowerCamelCase_ =dt
lowerCamelCase_ =sample
else:
# 2. 2nd order / Heun's method
lowerCamelCase_ =(sample - pred_original_sample) / sigma_next
lowerCamelCase_ =(self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCamelCase_ =self.dt
lowerCamelCase_ =self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ):
# mps does not support float64
lowerCamelCase_ =self.timesteps.to(original_samples.device, dtype=torch.floataa )
lowerCamelCase_ =timesteps.to(original_samples.device, dtype=torch.floataa )
else:
lowerCamelCase_ =self.timesteps.to(original_samples.device )
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =[self.index_for_timestep(lowerCAmelCase, lowerCAmelCase ) for t in timesteps]
lowerCamelCase_ =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase_ =sigma.unsqueeze(-1 )
lowerCamelCase_ =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 6 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=18, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=[0.5, 0.5, 0.5], ):
"""simple docstring"""
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =min_resolution
lowerCamelCase_ =max_resolution
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean
lowerCamelCase_ =image_std
def lowercase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =LevitImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LevitImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, np.ndarray )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 6 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : jnp.ndarray
lowercase : jnp.ndarray
class __UpperCamelCase ( nn.Module ):
lowercase : int
lowercase : Tuple[int] =(16, 32, 96, 2_56)
lowercase : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase_ =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ =self.block_out_channels[i]
lowerCamelCase_ =self.block_out_channels[i + 1]
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCAmelCase )
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCAmelCase )
lowerCamelCase_ =blocks
lowerCamelCase_ =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.conv_in(lowerCAmelCase )
lowerCamelCase_ =nn.silu(lowerCAmelCase )
for block in self.blocks:
lowerCamelCase_ =block(lowerCAmelCase )
lowerCamelCase_ =nn.silu(lowerCAmelCase )
lowerCamelCase_ =self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class __UpperCamelCase ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =32
lowercase : int =4
lowercase : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase : Union[bool, Tuple[bool]] =False
lowercase : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
lowercase : int =2
lowercase : Union[int, Tuple[int]] =8
lowercase : Optional[Union[int, Tuple[int]]] =None
lowercase : int =12_80
lowercase : float =0.0
lowercase : bool =False
lowercase : jnp.dtype =jnp.floataa
lowercase : bool =True
lowercase : int =0
lowercase : str ="rgb"
lowercase : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ =jnp.zeros(lowerCAmelCase, dtype=jnp.floataa )
lowerCamelCase_ =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase_ =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase_ =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ =jnp.zeros(lowerCAmelCase, dtype=jnp.floataa )
lowerCamelCase_, lowerCamelCase_ =jax.random.split(lowerCAmelCase )
lowerCamelCase_ ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.block_out_channels
lowerCamelCase_ =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase_ =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase_ =FlaxTimestepEmbedding(lowerCAmelCase, dtype=self.dtype )
lowerCamelCase_ =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase_ =self.only_cross_attention
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =block_out_channels[0]
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ =output_channel
lowerCamelCase_ =block_out_channels[i]
lowerCamelCase_ =i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ =FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase_ =FlaxDownBlockaD(
in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
lowerCamelCase_ =down_blocks
lowerCamelCase_ =controlnet_down_blocks
# mid
lowerCamelCase_ =block_out_channels[-1]
lowerCamelCase_ =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 1.0, lowerCAmelCase = True, lowerCAmelCase = False, ):
"""simple docstring"""
lowerCamelCase_ =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ =jnp.flip(lowerCAmelCase, axis=1 )
# 1. time
if not isinstance(lowerCAmelCase, jnp.ndarray ):
lowerCamelCase_ =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCAmelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ =jnp.expand_dims(lowerCAmelCase, 0 )
lowerCamelCase_ =self.time_proj(lowerCAmelCase )
lowerCamelCase_ =self.time_embedding(lowerCAmelCase )
# 2. pre-process
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
lowerCamelCase_ =self.conv_in(lowerCAmelCase )
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
lowerCamelCase_ =self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase_ =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_, lowerCamelCase_ =down_block(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, deterministic=not train )
else:
lowerCamelCase_, lowerCamelCase_ =down_block(lowerCAmelCase, lowerCAmelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ =self.mid_block(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ =()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase, self.controlnet_down_blocks ):
lowerCamelCase_ =controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ =controlnet_down_block_res_samples
lowerCamelCase_ =self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
lowerCamelCase_ =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase, mid_block_res_sample=lowerCAmelCase )
| 6 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def a_ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.shape(__snake_case )
lowerCamelCase_ =np.shape(__snake_case )
lowerCamelCase_ =np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
lowerCamelCase_ =(
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
lowerCamelCase_ =(
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__snake_case )
lowerCamelCase_ =pseudo_inv
if a_inv is None:
try:
lowerCamelCase_ =np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase_ =np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase_ =np.array([[2, 1], [6, 3]] )
lowerCamelCase_ =schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.block([[a, b], [b.T, c]] )
lowerCamelCase_ =np.linalg.det(lowerCAmelCase )
lowerCamelCase_ =np.linalg.det(lowerCAmelCase )
lowerCamelCase_ =np.linalg.det(lowerCAmelCase )
self.assertAlmostEqual(lowerCAmelCase, det_a * det_s )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase_ =np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase_ =np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase ):
schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase_ =np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase_ =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase ):
schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[Any] ) -> int:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =FunnelConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =FunnelBaseModel(__snake_case ) if base_model else FunnelModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
a_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 6 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Any = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
a_ : str = """</w>"""
a_ : Optional[int] = """@@ """
def a_ ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =set()
lowerCamelCase_ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ =char
return pairs
# Speech2Text2 has no max input length
a_ : Any = {"""facebook/s2t-wav2vec2-large-en-de""": 10_24}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Any =VOCAB_FILES_NAMES
lowercase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="<pad>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase=False, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, do_lower_case=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =do_lower_case
with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ =json.load(lowerCAmelCase )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCamelCase_ =None
lowerCamelCase_ =None
else:
with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ =merges_handle.read().split('''\n''' )[:-1]
lowerCamelCase_ =[tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ ={}
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.decoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase_ =get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_, lowerCamelCase_ =bigram
lowerCamelCase_ =[]
lowerCamelCase_ =0
while i < len(lowerCAmelCase ):
try:
lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ =j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =new_word
if len(lowerCAmelCase ) == 1:
break
else:
lowerCamelCase_ =get_pairs(lowerCAmelCase )
lowerCamelCase_ =''' '''.join(lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase_ ='''\n''' + BPE_TOKEN_MERGES
if word.endswith(lowerCAmelCase ):
lowerCamelCase_ =word.replace(lowerCAmelCase, '''''' )
lowerCamelCase_ =word.replace(''' ''', lowerCAmelCase )
lowerCamelCase_ =word
return word
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowerCamelCase_ =text.lower()
lowerCamelCase_ =text.split()
lowerCamelCase_ =[]
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.decoder.get(lowerCAmelCase, self.unk_token )
return result
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''' '''.join(lowerCAmelCase )
# make sure @@ tokens are concatenated
lowerCamelCase_ =''''''.join(string.split(lowerCAmelCase ) )
return string
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' )
lowerCamelCase_ =0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ =token_index
writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 6 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def a_ ( __snake_case : Tuple , __snake_case : str , __snake_case : Any , __snake_case : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ =multiprocessing.Manager()
lowerCamelCase_ =manager.list()
lowerCamelCase_ =multiprocessing.Process(target=__snake_case , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCamelCase_ =shutil.rmtree
lowerCamelCase_ =os.rmdir
lowerCamelCase_ =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCamelCase_ ={}
with swallow_io():
with time_limit(__snake_case ):
exec(__snake_case , __snake_case )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
lowerCamelCase_ =rmtree
lowerCamelCase_ =rmdir
lowerCamelCase_ =chdir
@contextlib.contextmanager
def a_ ( __snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
def signal_handler(__snake_case : str , __snake_case : int ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __snake_case )
signal.signal(signal.SIGALRM , __snake_case )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =WriteOnlyStringIO()
with contextlib.redirect_stdout(__snake_case ):
with contextlib.redirect_stderr(__snake_case ):
with redirect_stdin(__snake_case ):
yield
@contextlib.contextmanager
def a_ ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__snake_case ):
yield dirname
class __UpperCamelCase ( lowerCamelCase__ ):
pass
class __UpperCamelCase ( io.StringIO ):
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise OSError
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise OSError
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise OSError
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return False
class __UpperCamelCase ( contextlib._RedirectStream ): # type: ignore
lowercase : Optional[Any] ='stdin'
@contextlib.contextmanager
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
if root == ".":
yield
return
lowerCamelCase_ =os.getcwd()
os.chdir(__snake_case )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__snake_case )
def a_ ( __snake_case : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCamelCase_ =None
lowerCamelCase_ =None
import os
lowerCamelCase_ ='''1'''
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
import shutil
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
import subprocess
lowerCamelCase_ =None # type: ignore
lowerCamelCase_ =None
import sys
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
| 6 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 1 |
'''simple docstring'''
import math
import os
import sys
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =''''''
try:
with open(__snake_case , '''rb''' ) as binary_file:
lowerCamelCase_ =binary_file.read()
for dat in data:
lowerCamelCase_ =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a_ ( __snake_case : dict[str, str] , __snake_case : str , __snake_case : int , __snake_case : str ) -> None:
"""simple docstring"""
lexicon.pop(__snake_case )
lowerCamelCase_ =last_match_id
if math.loga(__snake_case ).is_integer():
for curr_key in lexicon:
lowerCamelCase_ ='''0''' + lexicon[curr_key]
lowerCamelCase_ =bin(__snake_case )[2:]
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ ={'''0''': '''0''', '''1''': '''1'''}
lowerCamelCase_, lowerCamelCase_ ='''''', ''''''
lowerCamelCase_ =len(__snake_case )
for i in range(len(__snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase_ =lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__snake_case , __snake_case , __snake_case , __snake_case )
index += 1
lowerCamelCase_ =''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCamelCase_ =lexicon[curr_string]
result += last_match_id
return result
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.getsize(__snake_case )
lowerCamelCase_ =bin(__snake_case )[2:]
lowerCamelCase_ =len(__snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a_ ( __snake_case : str , __snake_case : str ) -> None:
"""simple docstring"""
lowerCamelCase_ =8
try:
with open(__snake_case , '''wb''' ) as opened_file:
lowerCamelCase_ =[
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case ) , __snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a_ ( __snake_case : str , __snake_case : str ) -> None:
"""simple docstring"""
lowerCamelCase_ =read_file_binary(__snake_case )
lowerCamelCase_ =compress_data(__snake_case )
lowerCamelCase_ =add_file_length(__snake_case , __snake_case )
write_file_binary(__snake_case , __snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase_ =str(bin(__snake_case ) )[2:] # remove the leading "0b"
lowerCamelCase_ =str(bin(__snake_case ) )[2:] # remove the leading "0b"
lowerCamelCase_ =max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : Optional[Any] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='table-transformer'
lowercase : Optional[Any] =['past_key_values']
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=3, lowerCAmelCase=100, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=8, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=8, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, lowerCAmelCase=False, lowerCAmelCase="sine", lowerCAmelCase="resnet50", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=0.1, **lowerCAmelCase, ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase_ =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =backbone_config.get('''model_type''' )
lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ =config_class.from_dict(lowerCAmelCase )
# set timm attributes to None
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =None, None, None
lowerCamelCase_ =use_timm_backbone
lowerCamelCase_ =backbone_config
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_queries
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =init_xavier_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =auxiliary_loss
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =backbone
lowerCamelCase_ =use_pretrained_backbone
lowerCamelCase_ =dilation
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =mask_loss_coefficient
lowerCamelCase_ =dice_loss_coefficient
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.d_model
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-5
@property
def lowercase__ ( self ):
"""simple docstring"""
return 12
| 6 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
'''simple docstring'''
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =name
lowerCamelCase_ =val
def __str__( self ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self, lowerCAmelCase ):
"""simple docstring"""
return self.val < other.val
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
lowerCamelCase_ =self.build_heap(lowerCAmelCase )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
return self.get_value(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return (idx - 1) // 2
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return idx * 2 + 1
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return idx * 2 + 2
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.heap_dict[key]
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =len(lowerCAmelCase ) - 1
lowerCamelCase_ =self.get_parent_idx(lowerCAmelCase )
for idx, i in enumerate(lowerCAmelCase ):
lowerCamelCase_ =idx
lowerCamelCase_ =i.val
for i in range(lowerCAmelCase, -1, -1 ):
self.sift_down(lowerCAmelCase, lowerCAmelCase )
return array
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
while True:
lowerCamelCase_ =self.get_left_child_idx(lowerCAmelCase ) # noqa: E741
lowerCamelCase_ =self.get_right_child_idx(lowerCAmelCase )
lowerCamelCase_ =idx
if l < len(lowerCAmelCase ) and array[l] < array[idx]:
lowerCamelCase_ =l
if r < len(lowerCAmelCase ) and array[r] < array[smallest]:
lowerCamelCase_ =r
if smallest != idx:
lowerCamelCase_, lowerCamelCase_ =array[smallest], array[idx]
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase_ =smallest
else:
break
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_parent_idx(lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase_, lowerCamelCase_ =self.heap[idx], self.heap[p]
lowerCamelCase_, lowerCamelCase_ =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase_ =p
lowerCamelCase_ =self.get_parent_idx(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
return self.heap[0]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.heap[-1], self.heap[0]
lowerCamelCase_, lowerCamelCase_ =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase_ =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.heap.append(lowerCAmelCase )
lowerCamelCase_ =len(self.heap ) - 1
lowerCamelCase_ =node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase__ ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase_ =new_value
lowerCamelCase_ =new_value
self.sift_up(self.idx_of_element[node] )
a_ : Optional[int] = Node("""R""", -1)
a_ : List[str] = Node("""B""", 6)
a_ : Optional[int] = Node("""A""", 3)
a_ : str = Node("""X""", 1)
a_ : Any = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a_ : str = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __snake_case : List[str] , __snake_case : Optional[int]=0.9_9_9 , __snake_case : Dict="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : Tuple ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCamelCase_ =[]
for i in range(_UpperCAmelCase ):
lowerCamelCase_ =i / num_diffusion_timesteps
lowerCamelCase_ =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase : Optional[int] =[e.name for e in KarrasDiffusionSchedulers]
lowercase : List[str] =2
@register_to_config
def __init__( self, lowerCAmelCase = 1_000, lowerCAmelCase = 0.0_0_0_8_5, lowerCAmelCase = 0.0_1_2, lowerCAmelCase = "linear", lowerCAmelCase = None, lowerCAmelCase = "epsilon", lowerCAmelCase = "linspace", lowerCAmelCase = 0, ):
"""simple docstring"""
if trained_betas is not None:
lowerCamelCase_ =torch.tensor(__a, dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase_ =torch.linspace(__a, __a, __a, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ =(
torch.linspace(beta_start**0.5, beta_end**0.5, __a, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ =betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCamelCase_ =1.0 - self.betas
lowerCamelCase_ =torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(__a, __a, __a )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if schedule_timesteps is None:
lowerCamelCase_ =self.timesteps
lowerCamelCase_ =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase_ =1 if len(__a ) > 1 else 0
else:
lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(__a ) else timestep
lowerCamelCase_ =self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.index_for_timestep(__a )
if self.state_in_first_order:
lowerCamelCase_ =self.sigmas[step_index]
else:
lowerCamelCase_ =self.sigmas_interpol[step_index]
lowerCamelCase_ =sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =num_inference_steps
lowerCamelCase_ =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase_ =np.linspace(0, num_train_timesteps - 1, __a, dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase_ =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(np.arange(0, __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase_ =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(np.arange(__a, 0, -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCamelCase_ =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase_ =torch.from_numpy(np.log(__a ) ).to(__a )
lowerCamelCase_ =np.interp(__a, np.arange(0, len(__a ) ), __a )
lowerCamelCase_ =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase_ =torch.from_numpy(__a ).to(device=__a )
# interpolate sigmas
lowerCamelCase_ =sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
lowerCamelCase_ =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase_ =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__a ).startswith('''mps''' ):
# mps does not support float64
lowerCamelCase_ =torch.from_numpy(__a ).to(__a, dtype=torch.floataa )
else:
lowerCamelCase_ =torch.from_numpy(__a ).to(__a )
# interpolate timesteps
lowerCamelCase_ =self.sigma_to_t(__a ).to(__a, dtype=timesteps.dtype )
lowerCamelCase_ =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
lowerCamelCase_ =torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCamelCase_ =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase_ =defaultdict(__a )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =sigma.log()
# get distribution
lowerCamelCase_ =log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCamelCase_ =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCamelCase_ =low_idx + 1
lowerCamelCase_ =self.log_sigmas[low_idx]
lowerCamelCase_ =self.log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase_ =(low - log_sigma) / (low - high)
lowerCamelCase_ =w.clamp(0, 1 )
# transform interpolation to time range
lowerCamelCase_ =(1 - w) * low_idx + w * high_idx
lowerCamelCase_ =t.view(sigma.shape )
return t
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sample is None
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =self.index_for_timestep(__a )
# advance index counter by 1
lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase_ =self.sigmas[step_index]
lowerCamelCase_ =self.sigmas_interpol[step_index + 1]
lowerCamelCase_ =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCamelCase_ =self.sigmas[step_index - 1]
lowerCamelCase_ =self.sigmas_interpol[step_index]
lowerCamelCase_ =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase_ =0
lowerCamelCase_ =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase_ =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase_ =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase_ =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase_ =sigma_interpol - sigma_hat
# store for 2nd order step
lowerCamelCase_ =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCamelCase_ =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCamelCase_ =sigma_next - sigma_hat
lowerCamelCase_ =self.sample
lowerCamelCase_ =None
lowerCamelCase_ =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
lowerCamelCase_ =self.timesteps.to(original_samples.device, dtype=torch.floataa )
lowerCamelCase_ =timesteps.to(original_samples.device, dtype=torch.floataa )
else:
lowerCamelCase_ =self.timesteps.to(original_samples.device )
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =[self.index_for_timestep(__a, __a ) for t in timesteps]
lowerCamelCase_ =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase_ =sigma.unsqueeze(-1 )
lowerCamelCase_ =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Optional[int] = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
import operator as op
a_ : List[str] = """scaler.pt"""
a_ : Union[str, Any] = """pytorch_model"""
a_ : int = """random_states"""
a_ : str = """optimizer"""
a_ : Tuple = """scheduler"""
a_ : Dict = """pytorch_model.bin"""
a_ : Optional[int] = """pytorch_model.bin.index.json"""
a_ : int = """model.safetensors"""
a_ : str = """model.safetensors.index.json"""
a_ : List[Any] = """1.10.2"""
a_ : int = """py38"""
a_ : Optional[int] = """4.17.0"""
a_ : Any = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
a_ : Any = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
a_ : Optional[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
a_ : Any = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
a_ : Union[str, Any] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
a_ : int = """2.0.1"""
a_ : int = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
a_ : Optional[Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
a_ : Optional[Any] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a_ : Optional[int] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
a_ : Optional[Any] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
a_ : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
lowercase : str = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : Tuple = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : str = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : int = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : Any = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : List[str] = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : Dict = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : List[Any] = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : Optional[Any] = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : Any = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : List[str] = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : int = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=A__ ):
lowercase : List[str] = ['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =len(lowerCamelCase__ )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase_ =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase_ =arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase__ )]
# Reverse whole list
lowerCamelCase_ =arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
a_ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
a_ : int = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ : Dict = None
try:
import msvcrt
except ImportError:
a_ : Optional[Any] = None
try:
import fcntl
except ImportError:
a_ : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ : List[str] = OSError
# Data
# ------------------------------------------------
a_ : Optional[int] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ : Dict = """3.0.12"""
a_ : Dict = None
def a_ ( ) -> Tuple:
"""simple docstring"""
global _logger
lowerCamelCase_ =_logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( lowercase__ ):
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =lock_file
return None
def __str__( self ):
"""simple docstring"""
lowerCamelCase_ =f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =lock
return None
def __enter__( self ):
"""simple docstring"""
return self.lock
def __exit__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=-1, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCamelCase_ =self.hash_filename_if_too_long(_a, _a )
# The path to the lock file.
lowerCamelCase_ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCamelCase_ =None
# The default timeout value.
lowerCamelCase_ =timeout
# We use this lock primarily for the lock counter.
lowerCamelCase_ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCamelCase_ =0
return None
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._lock_file
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._timeout
@timeout.setter
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =float(_a )
return None
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._lock_file_fd is not None
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=0.0_5 ):
"""simple docstring"""
if timeout is None:
lowerCamelCase_ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCamelCase_ =id(self )
lowerCamelCase_ =self._lock_file
lowerCamelCase_ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCamelCase_ =max(0, self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase__ ( self, lowerCAmelCase=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCamelCase_ =id(self )
lowerCamelCase_ =self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowerCamelCase_ =0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
self.release()
return None
def __del__( self ):
"""simple docstring"""
self.release(force=_a )
return None
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =os.path.basename(_a )
if len(_a ) > max_length and max_length > 0:
lowerCamelCase_ =os.path.dirname(_a )
lowerCamelCase_ =str(hash(_a ) )
lowerCamelCase_ =filename[: max_length - len(_a ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(_a, _a )
else:
return path
class __UpperCamelCase ( lowercase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=-1, lowerCAmelCase=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_a, timeout=_a, max_filename_length=_a )
lowerCamelCase_ ='\\\\?\\' + relative_to_absolute_path(self.lock_file )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCamelCase_ =os.open(self._lock_file, _a )
except OSError:
pass
else:
try:
msvcrt.locking(_a, msvcrt.LK_NBLCK, 1 )
except OSError:
os.close(_a )
else:
lowerCamelCase_ =fd
return None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._lock_file_fd
lowerCamelCase_ =None
msvcrt.locking(_a, msvcrt.LK_UNLCK, 1 )
os.close(_a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( lowercase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=-1, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =os.statvfs(os.path.dirname(_a ) ).f_namemax
super().__init__(_a, timeout=_a, max_filename_length=_a )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCamelCase_ =os.open(self._lock_file, _a )
try:
fcntl.flock(_a, fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_a )
else:
lowerCamelCase_ =fd
return None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._lock_file_fd
lowerCamelCase_ =None
fcntl.flock(_a, fcntl.LOCK_UN )
os.close(_a )
return None
class __UpperCamelCase ( lowercase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCamelCase_ =os.open(self._lock_file, _a )
except OSError:
pass
else:
lowerCamelCase_ =fd
return None
def lowercase__ ( self ):
"""simple docstring"""
os.close(self._lock_file_fd )
lowerCamelCase_ =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ : List[str] = None
if msvcrt:
a_ : Union[str, Any] = WindowsFileLock
elif fcntl:
a_ : Dict = UnixFileLock
else:
a_ : Any = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from torch import nn
class __UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =class_size
lowerCamelCase_ =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase_ =nn.Linear(_snake_case, _snake_case )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.mlp(_snake_case )
return logits
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
a_ : Tuple = True
from torch.cuda.amp import autocast
a_ : str = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase : Optional[bool] =field(
default=lowercase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase : Optional[bool] =field(
default=lowercase__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowercase : Optional[float] =field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
lowercase : Optional[float] =field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
lowercase : Optional[float] =field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def a_ ( __snake_case : ModelArguments , __snake_case : TrainingArguments ) -> int:
"""simple docstring"""
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCamelCase_ =logging.WARNING
if model_args.verbose_logging:
lowerCamelCase_ =logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowerCamelCase_ =logging.INFO
logger.setLevel(lowerCamelCase_ )
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowercase__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase : Optional[str] =field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowercase : Optional[str] =field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowercase : Optional[str] =field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowercase : bool =field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowercase : Optional[int] =field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowercase : Optional[float] =field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __UpperCamelCase :
lowercase : WavaVecaForPreTraining
lowercase : WavaVecaFeatureExtractor
lowercase : Union[bool, str] ="longest"
lowercase : Optional[int] =None
lowercase : Optional[int] =None
def __call__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extractor.pad(
_UpperCamelCase, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
lowerCamelCase_ =self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
lowerCamelCase_ =batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCamelCase_ =self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
lowerCamelCase_ =torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCamelCase_ =1
lowerCamelCase_ =attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCamelCase_ =_compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=_UpperCamelCase, min_masks=2, )
return batch
class __UpperCamelCase ( lowercase__ ):
def __init__( self, *lowerCAmelCase, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=1.0, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*_UpperCamelCase, **_UpperCamelCase )
lowerCamelCase_ =0
lowerCamelCase_ =max_gumbel_temp
lowerCamelCase_ =min_gumbel_temp
lowerCamelCase_ =gumbel_temp_decay
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
model.train()
lowerCamelCase_ =self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
lowerCamelCase_ =self.compute_loss(_UpperCamelCase, _UpperCamelCase )
else:
lowerCamelCase_ =self.compute_loss(_UpperCamelCase, _UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCamelCase_ =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCamelCase_ =loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCamelCase_ =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ =parser.parse_args_into_dataclasses()
configure_logger(lowerCamelCase_ , lowerCamelCase_ )
# Downloading and loading a dataset from the hub.
lowerCamelCase_ =load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCamelCase_ =DatasetDict()
lowerCamelCase_ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCamelCase_ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCamelCase_ =DatasetDict()
lowerCamelCase_ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
lowerCamelCase_ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCamelCase_ =WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase_ )
def prepare_dataset(__snake_case : List[Any] ):
# check that all files have the correct sampling rate
lowerCamelCase_ =librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowerCamelCase_ =datasets.map(
lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
lowerCamelCase_ =vectorized_datasets.filter(
lambda __snake_case : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowerCamelCase_ =vectorized_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCamelCase_ =WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
lowerCamelCase_ =WavaVecaForPreTraining(lowerCamelCase_ )
lowerCamelCase_ =DataCollatorForWavaVecaPretraining(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
lowerCamelCase_ =WavaVecaPreTrainer(
model=lowerCamelCase_ , data_collator=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=lowerCamelCase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Any=False , __snake_case : Optional[Any]=False ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=2, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=36, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=6, lowerCAmelCase=6, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, lowerCAmelCase=1_000, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =coordinate_size
lowerCamelCase_ =shape_size
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
lowerCamelCase_ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =(image_size // patch_size) ** 2 + 1
lowerCamelCase_ =self.text_seq_length + self.image_seq_length
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ =bbox[i, j, 3]
lowerCamelCase_ =bbox[i, j, 1]
lowerCamelCase_ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ =bbox[i, j, 2]
lowerCamelCase_ =bbox[i, j, 0]
lowerCamelCase_ =t
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCamelCase_ =LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# text + image
lowerCamelCase_ =model(snake_case__, pixel_values=snake_case__ )
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__ )
lowerCamelCase_ =model(snake_case__, bbox=snake_case__, pixel_values=snake_case__, token_type_ids=snake_case__ )
lowerCamelCase_ =model(snake_case__, bbox=snake_case__, pixel_values=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase_ =model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase_ =model(pixel_values=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =LayoutLMvaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__, labels=snake_case__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =LayoutLMvaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__, labels=snake_case__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__, start_positions=snake_case__, end_positions=snake_case__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
lowerCamelCase_
) =config_and_inputs
lowerCamelCase_ ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A_ , A_ , unittest.TestCase ):
lowercase : str =False
lowercase : Any =False
lowercase : List[Any] =False
lowercase : int =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase : Dict =(
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return True
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=snake_case__, hidden_size=37 )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
lowerCamelCase_ ={
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(snake_case__, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCamelCase_ =torch.ones(self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
elif model_class in get_values(snake_case__ ):
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
lowerCamelCase_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=snake_case__, )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ =type
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =LayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(snake_case__ )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=snake_case__, return_tensors='''pt''' ).pixel_values.to(snake_case__ )
lowerCamelCase_ =torch.tensor([[1, 2]] )
lowerCamelCase_ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCamelCase_ =model(
input_ids=input_ids.to(snake_case__ ), bbox=bbox.to(snake_case__ ), pixel_values=pixel_values.to(snake_case__ ), )
# verify the logits
lowerCamelCase_ =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, snake_case__ )
lowerCamelCase_ =torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], snake_case__, atol=1e-4 ) )
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(lowerCAmelCase__ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase__ , repeat=lowerCAmelCase__ ):
lowerCamelCase_ =sum(lowerCAmelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(lowerCAmelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(lowerCAmelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import baseaa
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def a_ ( __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return baseaa.baadecode(lowercase__ ).decode('''utf-8''' )
if __name__ == "__main__":
a_ : List[str] = """Hello World!"""
a_ : Any = baseaa_encode(test)
print(encoded)
a_ : str = baseaa_decode(encoded)
print(decoded)
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ : Dict = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
a_ : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a_ ( __snake_case : Tuple , __snake_case : int=False ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =create_model(
'''HTSAT-tiny''' , '''roberta''' , _lowerCamelCase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowerCamelCase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a_ ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =R""".*sequential.(\d+).*"""
lowerCamelCase_ =R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ =key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
# replace sequential layers with list
lowerCamelCase_ =re.match(_lowerCamelCase , _lowerCamelCase ).group(1 )
lowerCamelCase_ =key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_lowerCamelCase )//3}.linear.''' )
elif re.match(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ =int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCamelCase_ =1 if projecton_layer == 0 else 2
lowerCamelCase_ =key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCamelCase_ =value
lowerCamelCase_ =mixed_qkv.size(0 ) // 3
lowerCamelCase_ =mixed_qkv[:qkv_dim]
lowerCamelCase_ =mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCamelCase_ =mixed_qkv[qkv_dim * 2 :]
lowerCamelCase_ =query_layer
lowerCamelCase_ =key_layer
lowerCamelCase_ =value_layer
else:
lowerCamelCase_ =value
return model_state_dict
def a_ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : List[Any]=False ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase )
clap_model.eval()
lowerCamelCase_ =clap_model.state_dict()
lowerCamelCase_ =rename_state_dict(_lowerCamelCase )
lowerCamelCase_ =ClapConfig()
lowerCamelCase_ =enable_fusion
lowerCamelCase_ =ClapModel(_lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
transformers_config.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
a_ : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 362 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt"""}
a_ : str = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a_ : str = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =collections.OrderedDict()
with open(a_ , '''r''' , encoding='''utf-8''' ) as reader:
lowerCamelCase_ =reader.readlines()
for index, token in enumerate(a_ ):
lowerCamelCase_ =token.rstrip('''\n''' )
lowerCamelCase_ =index
return vocab
class __UpperCamelCase ( a_ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase="<unk>", lowerCAmelCase=200 ):
"""simple docstring"""
lowerCamelCase_ =vocab
lowerCamelCase_ =unk_token
lowerCamelCase_ =max_input_chars_per_word
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =list(lowercase_ )
if len(lowercase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ =0
lowerCamelCase_ =[]
while start < len(lowercase_ ):
lowerCamelCase_ =len(lowercase_ )
lowerCamelCase_ =None
while start < end:
lowerCamelCase_ =''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase_ )
lowerCamelCase_ =end
return sub_tokens
class __UpperCamelCase ( a_ ):
lowercase : str =VOCAB_FILES_NAMES
lowercase : int =PRETRAINED_VOCAB_FILES_MAP
lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =['''input_ids''', '''attention_mask''']
lowercase : Union[str, Any] =False
def __init__( self, lowerCAmelCase, lowerCAmelCase="<d>", lowerCAmelCase="</d>", lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<pad>", lowerCAmelCase="<unk>", lowerCAmelCase="</n>", lowerCAmelCase="</_>", lowerCAmelCase="left", **lowerCAmelCase, ):
"""simple docstring"""
requires_backends(self, ['''jieba'''] )
super().__init__(
bod_token=lowercase_, eod_token=lowercase_, bos_token=lowercase_, eos_token=lowercase_, pad_token=lowercase_, unk_token=lowercase_, line_token=lowercase_, space_token=lowercase_, padding_side=lowercase_, **lowercase_, )
lowerCamelCase_ =bod_token
lowerCamelCase_ =eod_token
lowerCamelCase_ =load_vocab(lowercase_ )
lowerCamelCase_ =self.encoder[space_token]
lowerCamelCase_ =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ =collections.OrderedDict(sorted(self.encoder.items(), key=lambda lowerCAmelCase : x[1] ) )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
lowerCamelCase_ =WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for x in jieba.cut(lowercase_, cut_all=lowercase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) )
return output_tokens
def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[i for i in token_ids if i >= 0]
lowerCamelCase_ =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase_, **lowercase_ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return token in self.encoder
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return "".join(lowercase_ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowercase_, self.encoder.get(self.unk_token ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowercase_, self.unk_token )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if os.path.isdir(lowercase_ ):
lowerCamelCase_ =os.path.join(
lowercase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCamelCase_ =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCamelCase_ =0
if " " in self.encoder:
lowerCamelCase_ =self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ =self.encoder['''\n''']
del self.encoder["\n"]
lowerCamelCase_ =collections.OrderedDict(sorted(self.encoder.items(), key=lambda lowerCAmelCase : x[1] ) )
with open(lowercase_, '''w''', encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase_ =token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_, token_ids_a=lowercase_, already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ ))
| 363 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Any = {"""vocab_file""": """sentencepiece.bpe.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
a_ : List[str] = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
a_ : List[Any] = """▁"""
class __UpperCamelCase ( __lowercase ):
lowercase : Any =VOCAB_FILES_NAMES
lowercase : str =PRETRAINED_VOCAB_FILES_MAP
lowercase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =["input_ids", "attention_mask"]
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a, eos_token=_a, unk_token=_a, sep_token=_a, cls_token=_a, pad_token=_a, mask_token=_a, sp_model_kwargs=self.sp_model_kwargs, **_a, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
lowerCamelCase_ ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowerCamelCase_ =len(self.sp_model ) - 1
lowerCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
lowerCamelCase_ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a, token_ids_a=_a, already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(_a, out_type=_a )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ =self.sp_model.PieceToId(_a )
return spm_id if spm_id else self.unk_token_id
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
lowerCamelCase_ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowerCamelCase_ =True
lowerCamelCase_ =[]
else:
current_sub_tokens.append(_a )
lowerCamelCase_ =False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
_a, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a_ ( __snake_case : int , __snake_case : List[str] , __snake_case : str ) -> List[str]:
"""simple docstring"""
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def a_ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =[line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
lowerCamelCase_ =[]
if args.gold_data_mode == "qa":
lowerCamelCase_ =pd.read_csv(lowerCAmelCase__ , sep='''\t''' , header=lowerCAmelCase__ )
for answer_list in data[1]:
lowerCamelCase_ =ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
lowerCamelCase_ =[line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
lowerCamelCase_ =[[reference] for reference in references]
lowerCamelCase_ =0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase_ =1_0_0.0 * em / total
lowerCamelCase_ =1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def a_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =args.k
lowerCamelCase_ =[line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
lowerCamelCase_ =[line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
lowerCamelCase_ =0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCamelCase_ =set(hypo.split('''\t''' )[:k] )
lowerCamelCase_ =set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCamelCase_ =1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def a_ ( __snake_case : Dict , __snake_case : Any , __snake_case : Dict ) -> int:
"""simple docstring"""
def strip_title(__snake_case : Optional[int] ):
if title.startswith('''\"''' ):
lowerCamelCase_ =title[1:]
if title.endswith('''\"''' ):
lowerCamelCase_ =title[:-1]
return title
lowerCamelCase_ =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device )
lowerCamelCase_ =rag_model.rag.question_encoder(lowerCAmelCase__ )
lowerCamelCase_ =question_enc_outputs[0]
lowerCamelCase_ =rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowerCamelCase_ =rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCamelCase_ =[]
for docs in all_docs:
lowerCamelCase_ =[strip_title(lowerCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append('''\t'''.join(lowerCAmelCase__ ) )
return provenance_strings
def a_ ( __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
lowerCamelCase_ =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
lowerCamelCase_ =inputs_dict.input_ids.to(args.device )
lowerCamelCase_ =inputs_dict.attention_mask.to(args.device )
lowerCamelCase_ =rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCamelCase_ =rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=lowerCAmelCase__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=lowerCAmelCase__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=lowerCAmelCase__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=lowerCAmelCase__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=lowerCAmelCase__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def a_ ( __snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
if args.model_type is None:
lowerCamelCase_ =infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowerCamelCase_ =RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCamelCase_ =args.n_docs
if args.index_name is not None:
lowerCamelCase_ =args.index_name
if args.index_path is not None:
lowerCamelCase_ =args.index_path
else:
lowerCamelCase_ =BartForConditionalGeneration
lowerCamelCase_ =(
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase__ )
lowerCamelCase_ =get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCamelCase_ =evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCamelCase_ =model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
lowerCamelCase_ =model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowerCamelCase_ =[]
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
lowerCamelCase_ =evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write('''\n'''.join(lowerCAmelCase__ ) + '''\n''' )
preds_file.flush()
lowerCamelCase_ =[]
if len(lowerCAmelCase__ ) > 0:
lowerCamelCase_ =evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write('''\n'''.join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ : Any = get_args()
main(args)
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def a_ ( __snake_case : List[str] , __snake_case : Any=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
lowerCamelCase_ ="segformer.encoder." + key
if key.startswith('''backbone''' ):
lowerCamelCase_ =key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase_ =key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCamelCase_ =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCamelCase_ =key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase_ =key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
lowerCamelCase_ =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCamelCase_ =key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCamelCase_ =key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase_ =key[key.find('''block''' ) + len('''block''' )]
lowerCamelCase_ =key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCamelCase_ =key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCamelCase_ =key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCamelCase_ =key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCamelCase_ =key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCamelCase_ =key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCamelCase_ =key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCamelCase_ =key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCamelCase_ =key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase_ =key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCamelCase_ =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('''head''' ):
lowerCamelCase_ =key.replace('''head''' , '''classifier''' )
lowerCamelCase_ =value
return new_state_dict
def a_ ( __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase_ =state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCamelCase_ =state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCamelCase_ =kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase_ =kv_bias[: config.hidden_sizes[i]]
lowerCamelCase_ =kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase_ =kv_bias[
config.hidden_sizes[i] :
]
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ =Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def a_ ( __snake_case : Tuple , __snake_case : Dict , __snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =SegformerConfig()
lowerCamelCase_ =False
# set attributes based on model_name
lowerCamelCase_ ="huggingface/label-files"
if "segformer" in model_name:
lowerCamelCase_ =model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
lowerCamelCase_ =150
lowerCamelCase_ ="ade20k-id2label.json"
lowerCamelCase_ =(1, 150, 128, 128)
elif "city" in model_name:
lowerCamelCase_ =19
lowerCamelCase_ ="cityscapes-id2label.json"
lowerCamelCase_ =(1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCamelCase_ =True
lowerCamelCase_ =model_name[4:6]
lowerCamelCase_ =1000
lowerCamelCase_ ="imagenet-1k-id2label.json"
lowerCamelCase_ =(1, 1000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCamelCase_ =json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(a_ ): v for k, v in idalabel.items()}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCamelCase_ =[64, 128, 320, 512]
lowerCamelCase_ =256
elif size == "b2":
lowerCamelCase_ =[64, 128, 320, 512]
lowerCamelCase_ =768
lowerCamelCase_ =[3, 4, 6, 3]
elif size == "b3":
lowerCamelCase_ =[64, 128, 320, 512]
lowerCamelCase_ =768
lowerCamelCase_ =[3, 4, 18, 3]
elif size == "b4":
lowerCamelCase_ =[64, 128, 320, 512]
lowerCamelCase_ =768
lowerCamelCase_ =[3, 8, 27, 3]
elif size == "b5":
lowerCamelCase_ =[64, 128, 320, 512]
lowerCamelCase_ =768
lowerCamelCase_ =[3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCamelCase_ =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=a_ , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCamelCase_ =torch.load(a_ , map_location=torch.device('''cpu''' ) )
else:
lowerCamelCase_ =torch.load(a_ , map_location=torch.device('''cpu''' ) )["state_dict"]
# rename keys
lowerCamelCase_ =rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCamelCase_ =False
lowerCamelCase_ =SegformerForImageClassification(a_ )
else:
lowerCamelCase_ =SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCamelCase_ =model(a_ )
lowerCamelCase_ =outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCamelCase_ =torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCamelCase_ =torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCamelCase_ =torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCamelCase_ =torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCamelCase_ =torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCamelCase_ =torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
lowerCamelCase_ =logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def a_ ( __snake_case : Any , __snake_case : int=() , __snake_case : Optional[Any]=None , __snake_case : Tuple="no" , __snake_case : Any="29500" ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCamelCase_ =True
elif "IPython" in sys.modules:
lowerCamelCase_ ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCamelCase_ =PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCamelCase_ =8
lowerCamelCase_ =PrepareForLaunch(_UpperCamelCase , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr='''127.0.01''' , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ):
lowerCamelCase_ =PrepareForLaunch(_UpperCamelCase , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_ ='''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_UpperCamelCase )
def a_ ( __snake_case : Any , __snake_case : List[str]=() , __snake_case : Union[str, Any]=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCamelCase_ =PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase )
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method='''fork''' )
| 367 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def a_ ( __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =credit_card_number
lowerCamelCase_ =0
lowerCamelCase_ =len(a__ ) - 2
for i in range(a__ , -1 , -2 ):
# double the value of every second digit
lowerCamelCase_ =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase_ =cc_number[:i] + str(a__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a_ ( __snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(a__ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(a__ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(a__ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 368 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ : List[Any] = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase_ =numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
lowerCamelCase_ =numbers[i]
if number < 0:
lowerCamelCase_ =min_till_now, max_till_now
lowerCamelCase_ =max(UpperCAmelCase__ , max_till_now * number )
lowerCamelCase_ =min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
lowerCamelCase_ =max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import datasets
a_ : Dict = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
a_ : Tuple = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
a_ : Union[str, Any] = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
"""
def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ), codebase_urls=[], reference_urls=[], format='''numpy''', )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return {"accuracy": simple_accuracy(_a, _a )}
| 371 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
def a_ ( __snake_case : Union[str, Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase_ =[(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def a_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int]=False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ =""
else:
lowerCamelCase_ ="deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ =in_proj_bias[: config.hidden_size]
lowerCamelCase_ =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ =in_proj_bias[-config.hidden_size :]
def a_ ( __snake_case : int , __snake_case : List[str] , __snake_case : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__lowerCamelCase )
lowerCamelCase_ =val
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( __snake_case : Dict , __snake_case : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase_ =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase_ =1000
lowerCamelCase_ ="huggingface/label-files"
lowerCamelCase_ ="imagenet-1k-id2label.json"
lowerCamelCase_ =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =int(deit_name[-6:-4] )
lowerCamelCase_ =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
lowerCamelCase_ =192
lowerCamelCase_ =768
lowerCamelCase_ =12
lowerCamelCase_ =3
elif deit_name[9:].startswith('''small''' ):
lowerCamelCase_ =384
lowerCamelCase_ =1536
lowerCamelCase_ =12
lowerCamelCase_ =6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
lowerCamelCase_ =1024
lowerCamelCase_ =4096
lowerCamelCase_ =24
lowerCamelCase_ =16
# load original model from timm
lowerCamelCase_ =timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ =timm_model.state_dict()
lowerCamelCase_ =create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
lowerCamelCase_ =DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase_ =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase_ =DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ =encoding["pixel_values"]
lowerCamelCase_ =model(__lowerCamelCase )
lowerCamelCase_ =timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1e-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ : Union[str, Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a_ : str = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def a_ ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
a_ : str = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =list(s_dict.keys() )
for key in keys:
lowerCamelCase_ =key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase_ =new_key.replace(_snake_case , _snake_case )
print(F'''{key} -> {new_key}''' )
lowerCamelCase_ =s_dict.pop(_snake_case )
return s_dict
def a_ ( __snake_case : int ) -> str:
"""simple docstring"""
lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : str , __snake_case : str ) -> bytes:
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCamelCase_ =os.path.basename(_snake_case )
lowerCamelCase_ =url.split('''/''' )[-2]
lowerCamelCase_ =os.path.join(_snake_case , _snake_case )
if os.path.exists(_snake_case ) and not os.path.isfile(_snake_case ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_snake_case ):
lowerCamelCase_ =open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_snake_case ) as source, open(_snake_case , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_snake_case , unit_divisor=1024 ) as loop:
while True:
lowerCamelCase_ =source.read(8192 )
if not buffer:
break
output.write(_snake_case )
loop.update(len(_snake_case ) )
lowerCamelCase_ =open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def a_ ( __snake_case : Any , __snake_case : int ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
lowerCamelCase_ =_download(_MODELS[checkpoint_path] )
else:
lowerCamelCase_ =torch.load(_snake_case , map_location='''cpu''' )
lowerCamelCase_ =original_checkpoint['''dims''']
lowerCamelCase_ =original_checkpoint['''model_state_dict''']
lowerCamelCase_ =state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
lowerCamelCase_ =True
lowerCamelCase_ =state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCamelCase_ =WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_snake_case , decoder_ffn_dim=_snake_case , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCamelCase_ =WhisperForConditionalGeneration(_snake_case )
lowerCamelCase_ =model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
lowerCamelCase_ =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase_ =proj_out_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
if isinstance(snake_case__, snake_case__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCamelCase_ =deepcopy(snake_case__ )
elif os.path.exists(snake_case__ ):
with io.open(snake_case__, '''r''', encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.load(snake_case__ )
else:
try:
lowerCamelCase_ =baseaa.urlsafe_baadecode(snake_case__ ).decode('''utf-8''' )
lowerCamelCase_ =json.loads(snake_case__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowerCamelCase_ =config
self.set_stage_and_offload()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_value('''zero_optimization.stage''', -1 )
# offload
lowerCamelCase_ =False
if self.is_zeroa() or self.is_zeroa():
lowerCamelCase_ =set(['''cpu''', '''nvme'''] )
lowerCamelCase_ =set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCamelCase_ =True
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.config
# find the config node of interest if it exists
lowerCamelCase_ =ds_key_long.split('''.''' )
lowerCamelCase_ =nodes.pop()
for node in nodes:
lowerCamelCase_ =config.get(snake_case__ )
if config is None:
return None, ds_key
return config, ds_key
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =self.find_config_node(snake_case__ )
if config is None:
return default
return config.get(snake_case__, snake_case__ )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =self.config
# find the config node of interest if it exists
lowerCamelCase_ =ds_key_long.split('''.''' )
for node in nodes:
lowerCamelCase_ =config
lowerCamelCase_ =config.get(snake_case__ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case__ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_value(snake_case__ )
return False if value is None else bool(snake_case__ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_value(snake_case__ )
return False if value is None else not bool(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
return self._stage == 2
def lowercase__ ( self ):
"""simple docstring"""
return self._stage == 3
def lowercase__ ( self ):
"""simple docstring"""
return self._offload
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =engine
def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
self.engine.backward(snake_case__, **snake_case__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
super().__init__(snake_case__, device_placement=snake_case__, scaler=snake_case__ )
lowerCamelCase_ =hasattr(self.optimizer, '''overflow''' )
def lowercase__ ( self, lowerCAmelCase=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__(snake_case__, snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=0.0_0_1, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =params
lowerCamelCase_ =lr
lowerCamelCase_ =weight_decay
lowerCamelCase_ =kwargs
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =optimizer
lowerCamelCase_ =total_num_steps
lowerCamelCase_ =warmup_num_steps
lowerCamelCase_ =kwargs
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
a_ : Optional[int] = """\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
a_ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
from typing import Any
def a_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> Union[str, Any]:
"""simple docstring"""
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
lowerCamelCase_ ={}
lowerCamelCase_ ={}
for state in states_space:
lowerCamelCase_ =observations_space[0]
lowerCamelCase_ =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase_ =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
lowerCamelCase_ =observations_space[o]
lowerCamelCase_ =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase_ =""""""
lowerCamelCase_ =-1
for k_state in states_space:
lowerCamelCase_ =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase_ =probability
lowerCamelCase_ =k_state
# Update probabilities and pointers dicts
lowerCamelCase_ =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase_ =arg_max
# The final observation
lowerCamelCase_ =observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
lowerCamelCase_ =""""""
lowerCamelCase_ =-1
for k_state in states_space:
lowerCamelCase_ =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase_ =probability
lowerCamelCase_ =k_state
lowerCamelCase_ =arg_max
# Process pointers backwards
lowerCamelCase_ =last_state
lowerCamelCase_ =[]
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
lowerCamelCase_ =pointers[previous, observations_space[o]]
result.reverse()
return result
def a_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> str:
"""simple docstring"""
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def a_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> str:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def a_ ( __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def a_ ( __snake_case : Any , __snake_case : str ) -> Tuple:
"""simple docstring"""
if not isinstance(_object , snake_case_ ):
lowerCamelCase_ =F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
lowerCamelCase_ =F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def a_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> int:
"""simple docstring"""
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def a_ ( __snake_case : Any , __snake_case : str ) -> Dict:
"""simple docstring"""
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def a_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_object , snake_case_ ):
lowerCamelCase_ =F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
lowerCamelCase_ =F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
lowerCamelCase_ ="""nested dictionary """ if nested else """"""
lowerCamelCase_ =F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( __UpperCamelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a_ : int = logging.get_logger(__name__)
class __UpperCamelCase ( __lowerCamelCase ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''', UpperCamelCase_, )
super().__init__(*UpperCamelCase_, **UpperCamelCase_ )
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Tuple = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : List[Any] = {"vocab_file": "spm_char.model"}
a_ : int = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
a_ : Any = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : Dict =VOCAB_FILES_NAMES
lowercase : str =PRETRAINED_VOCAB_FILES_MAP
lowercase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(a_, out_type=a_ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(a_ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(a_ )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
lowerCamelCase_ =[1]
if token_ids_a is None:
return ([0] * len(a_ )) + suffix_ones
return ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
a_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : list[list] ) -> list[list]:
"""simple docstring"""
lowerCamelCase_ =current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ =row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowerCamelCase_ =column
continue
lowerCamelCase_ =column / magnitude
# Subtract to cancel term
lowerCamelCase_ =current_set[0]
lowerCamelCase_ =[first_row]
lowerCamelCase_ =current_set[1::]
for row in current_set:
lowerCamelCase_ =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_ =final_set[0]
lowerCamelCase_ =[]
lowerCamelCase_ =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_ =simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowerCamelCase_ =resultant
return final_set
def a_ ( __snake_case : list[list] ) -> list:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowerCamelCase_ =len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_ =equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_ =data_set.copy()
lowerCamelCase_ =[]
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowerCamelCase_ =data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , lowerCAmelCase__ )
lowerCamelCase_ =data_set.copy()
lowerCamelCase_ =simplify(lowerCAmelCase__ )
lowerCamelCase_ =simplified[::-1]
lowerCamelCase_ =[]
for row in simplified:
lowerCamelCase_ =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_ =row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowerCamelCase_ =temp_row[1::]
lowerCamelCase_ =temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowerCamelCase_ =[]
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Dict =KandinskyVaaPipeline
lowercase : List[Any] =[
'image_embeds',
'negative_image_embeds',
]
lowercase : Tuple =['image_embeds', 'negative_image_embeds']
lowercase : Dict =[
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Tuple =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase_ =UNetaDConditionModel(**_snake_case )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_unet
lowerCamelCase_ =self.dummy_movq
lowerCamelCase_ =DDIMScheduler(
num_train_timesteps=1_000, beta_schedule='''linear''', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='''epsilon''', thresholding=_snake_case, )
lowerCamelCase_ ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCamelCase_ =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_snake_case )
if str(_snake_case ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(_snake_case )
else:
lowerCamelCase_ =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCamelCase_ ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**_snake_case )
lowerCamelCase_ =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(_snake_case ) )
lowerCamelCase_ =output.images
lowerCamelCase_ =pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ =np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCamelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
lowerCamelCase_ =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''', torch_dtype=torch.floataa )
lowerCamelCase_ =pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
lowerCamelCase_ ='''red cat, 4k photo'''
lowerCamelCase_ =torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCamelCase_, lowerCamelCase_ =pipe_prior(
_snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
lowerCamelCase_ =torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCamelCase_ =pipeline(
image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=100, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( __snake_case , unittest.TestCase ):
lowercase : List[Any] =XGLMTokenizer
lowercase : int =XGLMTokenizerFast
lowercase : List[Any] =True
lowercase : Optional[int] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ =XGLMTokenizer(lowerCAmelCase, keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''<pad>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(len(lowerCAmelCase ), 1_008 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_008 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XGLMTokenizer(lowerCAmelCase, keep_accents=lowerCAmelCase )
lowerCamelCase_ =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase, f.name )
lowerCamelCase_ =XGLMTokenizer(f.name, keep_accents=lowerCAmelCase )
lowerCamelCase_ =pickle.dumps(lowerCAmelCase )
pickle.loads(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ ='''I was born in 92000, and this is falsé.'''
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
lowerCamelCase_ =rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =rust_tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase )
lowerCamelCase_ =rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''Hello World!'''
lowerCamelCase_ =[2, 31_227, 4_447, 35]
self.assertListEqual(lowerCAmelCase, self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
lowerCamelCase_ =[2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase, self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase, model_name='''facebook/xglm-564M''', padding=lowerCAmelCase, )
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a_ ( __snake_case : List[str] , __snake_case : Any , __snake_case : List[str] , __snake_case : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ ={
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase_ =F'''{src_lang}-{tgt_lang}'''
lowerCamelCase_ =F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
lowerCamelCase_ =os.path.join(UpperCamelCase__ , '''README.md''' )
print(F'''Generating {path}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
a_ : Any = Path(__file__).resolve().parent.parent.parent
a_ : List[Any] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : int = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 362 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCamelCase :
def __init__( self, lowerCAmelCase = "cpu", lowerCAmelCase = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
lowerCamelCase_ =device
lowerCamelCase_ =CLIPTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
lowerCamelCase_ =[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
lowerCamelCase_ =torchvision.transforms.Normalize(self.image_mean, self.image_std )
lowerCamelCase_ =torchvision.transforms.Resize(224 )
lowerCamelCase_ =torchvision.transforms.CenterCrop(224 )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.resize(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.center_crop(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.normalize(_SCREAMING_SNAKE_CASE )
return images
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer(text=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.preprocess_img(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase=10, lowerCAmelCase=0.0_1, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase="image", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =None
lowerCamelCase_ =device if device else get_device()
if vqgan:
lowerCamelCase_ =vqgan
else:
lowerCamelCase_ =load_vqgan(self.device, conf_path=_SCREAMING_SNAKE_CASE, ckpt_path=_SCREAMING_SNAKE_CASE )
self.vqgan.eval()
if clip:
lowerCamelCase_ =clip
else:
lowerCamelCase_ =CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCamelCase_ =ProcessorGradientFlow(device=self.device )
lowerCamelCase_ =iterations
lowerCamelCase_ =lr
lowerCamelCase_ =log
lowerCamelCase_ =make_grid
lowerCamelCase_ =return_val
lowerCamelCase_ =quantize
lowerCamelCase_ =self.vqgan.decoder.z_shape
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=5, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =[]
if output_path is None:
lowerCamelCase_ ="./animation.gif"
if input_path is None:
lowerCamelCase_ =self.save_path
lowerCamelCase_ =sorted(glob(input_path + '''/*''' ) )
if not len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCamelCase_ =total_duration / len(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[frame_duration] * len(_SCREAMING_SNAKE_CASE )
if extend_frames:
lowerCamelCase_ =1.5
lowerCamelCase_ =3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_SCREAMING_SNAKE_CASE ) )
imageio.mimsave(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, duration=_SCREAMING_SNAKE_CASE )
print(f'''gif saved to {output_path}''' )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCamelCase_ =preprocess(Image.open(_SCREAMING_SNAKE_CASE ), target_image_size=256 ).to(self.device )
lowerCamelCase_ =preprocess_vqgan(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.vqgan.encode(_SCREAMING_SNAKE_CASE )
return z
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.latent.detach().requires_grad_()
lowerCamelCase_ =base_latent + transform_vector
if self.quantize:
lowerCamelCase_ =self.vqgan.quantize(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =trans_latent
return self.vqgan.decode(_SCREAMING_SNAKE_CASE )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =self.clip_preprocessor(text=_SCREAMING_SNAKE_CASE, images=_SCREAMING_SNAKE_CASE, return_tensors='''pt''', padding=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.clip(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase_ =similarity_logits * weights
return similarity_logits.sum()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self._get_clip_similarity(pos_prompts['''prompts'''], _SCREAMING_SNAKE_CASE, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCamelCase_ =self._get_clip_similarity(neg_prompts['''prompts'''], _SCREAMING_SNAKE_CASE, weights=neg_prompts['''weights'''] )
else:
lowerCamelCase_ =torch.tensor([1], device=self.device )
lowerCamelCase_ =-torch.log(_SCREAMING_SNAKE_CASE ) + torch.log(_SCREAMING_SNAKE_CASE )
return loss
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =torch.randn_like(self.latent, requires_grad=_SCREAMING_SNAKE_CASE, device=self.device )
lowerCamelCase_ =torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase_ =self._add_vector(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =loop_post_process(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._get_CLIP_loss(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
print('''CLIP loss''', _SCREAMING_SNAKE_CASE )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
wandb.init(reinit=_SCREAMING_SNAKE_CASE, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCamelCase_ =Image.open(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(_SCREAMING_SNAKE_CASE ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if not prompts:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_SCREAMING_SNAKE_CASE, (tuple, list) ):
lowerCamelCase_ =prompt[0]
lowerCamelCase_ =float(prompt[1] )
elif ":" in prompt:
lowerCamelCase_ =prompt.split(''':''' )
lowerCamelCase_ =float(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =prompt
lowerCamelCase_ =1.0
processed_prompts.append(_SCREAMING_SNAKE_CASE )
weights.append(_SCREAMING_SNAKE_CASE )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_SCREAMING_SNAKE_CASE, device=self.device ),
}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=None, ):
"""simple docstring"""
if image_path:
lowerCamelCase_ =self._get_latent(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase_ =self.process_prompts(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.process_prompts(_SCREAMING_SNAKE_CASE )
if save_final and save_path is None:
lowerCamelCase_ =os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =save_path + "_" + get_timestamp()
os.makedirs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =save_path
lowerCamelCase_ =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =loop_post_process(_SCREAMING_SNAKE_CASE )
for iter, transformed_img in enumerate(self._optimize_CLIP(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) ):
if show_intermediate:
show_pil(_SCREAMING_SNAKE_CASE )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_SCREAMING_SNAKE_CASE )} )
if show_final:
show_pil(_SCREAMING_SNAKE_CASE )
if save_final:
transformed_img.save(os.path.join(self.save_path, f'''iter_{iter:03d}_final.png''' ) )
| 363 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 0 |
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(_UpperCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a_ ( __snake_case : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ =model.config
lowerCamelCase_ =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCamelCase_ =MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE_ , add_final_layer_norm=SCREAMING_SNAKE_CASE_ , )
return encoder_config, decoder_config
def a_ ( __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
lowerCamelCase_ =name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
lowerCamelCase_ =name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
lowerCamelCase_ ='''encoder.''' + name
if "attn.proj" in name:
lowerCamelCase_ =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
lowerCamelCase_ =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase_ =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase_ =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase_ =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase_ =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCamelCase_ ='''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
lowerCamelCase_ ='''encoder.layernorm.bias'''
return name
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase_ =orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =int(key_split[3] )
lowerCamelCase_ =int(key_split[5] )
lowerCamelCase_ =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase_ =val[:dim, :]
lowerCamelCase_ =val[dim : dim * 2, :]
lowerCamelCase_ =val[-dim:, :]
else:
lowerCamelCase_ =val[:dim]
lowerCamelCase_ =val[dim : dim * 2]
lowerCamelCase_ =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCamelCase_ =val
return orig_state_dict
def a_ ( __snake_case : List[Any] , __snake_case : List[str]=None , __snake_case : Union[str, Any]=False ) -> Any:
"""simple docstring"""
# load original model
lowerCamelCase_ =DonutModel.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
# load HuggingFace model
lowerCamelCase_, lowerCamelCase_ =get_configs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =DonutSwinModel(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =MBartForCausalLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ =original_model.state_dict()
lowerCamelCase_ =convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify results on scanned document
lowerCamelCase_ =load_dataset('''hf-internal-testing/example-documents''' )
lowerCamelCase_ =dataset['''test'''][0]['''image'''].convert('''RGB''' )
lowerCamelCase_ =XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ , from_slow=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCamelCase_ =DonutProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCamelCase_ ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCamelCase_ ='''When is the coffee break?'''
lowerCamelCase_ =task_prompt.replace('''{user_input}''' , SCREAMING_SNAKE_CASE_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCamelCase_ ='''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCamelCase_ ='''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCamelCase_ ='''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCamelCase_ ='''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCamelCase_ ='''hello world'''
else:
raise ValueError('''Model name not supported''' )
lowerCamelCase_ =original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )[
'''input_ids'''
]
lowerCamelCase_ =original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_, lowerCamelCase_ =model.encoder.embeddings(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
# verify encoder hidden states
lowerCamelCase_ =original_model.encoder(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =model.encoder(SCREAMING_SNAKE_CASE_ ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
# verify decoder hidden states
lowerCamelCase_ =original_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits
lowerCamelCase_ =model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
a_ : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCamelCase ( UpperCamelCase__ ):
lowercase : List[str] =(
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowercase : Dict ='CIDAS/clipseg-rd64-refined'
lowercase : Any ='image_segmenter'
lowercase : Any =CLIPSegForImageSegmentation
lowercase : Optional[Any] =['image', 'text']
lowercase : Dict =['image']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''vision'''] )
super().__init__(*__lowerCamelCase, **__lowerCamelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return self.pre_processor(text=[label], images=[image], padding=__lowerCamelCase, return_tensors='''pt''' )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
with torch.no_grad():
lowerCamelCase_ =self.model(**__lowerCamelCase ).logits
return logits
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =outputs.cpu().detach().numpy()
lowerCamelCase_ =0
lowerCamelCase_ =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from string import ascii_uppercase
a_ : int = {str(ord(c) - 55): c for c in ascii_uppercase}
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
lowerCamelCase_ =''''''
lowerCamelCase_ =0
lowerCamelCase_ =0
while div != 1:
lowerCamelCase_, lowerCamelCase_ =divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 11 and 9 < mod < 36:
lowerCamelCase_ =ALPHABET_VALUES[str(__lowerCamelCase )]
else:
lowerCamelCase_ =str(__lowerCamelCase )
new_value += actual_value
lowerCamelCase_ =num // base
lowerCamelCase_ =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 367 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.